python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for MC Dropout agent."""
import dataclasses
from typing import Sequence
from enn import losses
from enn import networks
from neural_testbed import agents
from neural_testbed import base as testbed_base
from neural_testbed.agents.factories import base as factories_base
import numpy as np
import optax
@dataclasses.dataclass
class McDropoutConfig:
"""Configuration for mc dropout agent."""
dropout_rate: float = 0.1 # Drop probability for each hidden unit
length_scale: float = 1. # Length scale used for weight regularization
regularization_tau: float = 1. # tau for scaling the weight regularizer
dropout_input: bool = False # Whether to have dropout for the input layer
exclude_bias_l2: bool = False # Whether to exclude bias from regularization
adaptive_weight_scale: bool = True # Whether to scale with prior
hidden_sizes: Sequence[int] = (100, 100) # Hidden sizes for neural network
num_batches: int = 1000 # Number of SGD steps
learning_rate: float = 1e-3 # Learning rate for adam optimizer
seed: int = 0 # Initialization seed
def make_mc_dropout_agent(
config: McDropoutConfig) -> agents.VanillaEnnAgent:
"""Factory method to create MC dropout agent."""
def make_enn(prior: testbed_base.PriorKnowledge) -> networks.EnnArray:
enn = networks.MLPDropoutENN(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
dropout_rate=config.dropout_rate,
dropout_input=config.dropout_input,
seed=config.seed,
)
return enn
def make_loss(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn
single_loss = losses.combine_single_index_losses_as_metric(
train_loss=losses.XentLoss(prior.num_classes),
extra_losses={
'acc': losses.AccuracyErrorLoss(prior.num_classes)
},
)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss, num_index_samples=1)
# Adding a special weight regularization based on paper "Dropout as a
# Bayesian Approximation: Representing Model Uncertainty in Deep Learning",
# https://github.com/yaringal/DropoutUncertaintyExps/blob/master/net/net.py#L72
scale = (config.length_scale**2) * (1 - config.dropout_rate) / (
2. * prior.num_train * config.regularization_tau)
if config.adaptive_weight_scale:
scale = config.length_scale * np.sqrt(
prior.temperature) * prior.input_dim / prior.num_train
if config.exclude_bias_l2:
predicate = lambda module, name, value: name != 'b'
else:
predicate = lambda module, name, value: True
loss_fn = losses.add_l2_weight_decay(loss_fn, scale, predicate)
return loss_fn
agent_config = agents.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
optimizer=optax.adam(config.learning_rate),
num_batches=config.num_batches,
seed=config.seed,)
return agents.VanillaEnnAgent(agent_config)
def droprate_sweep() -> Sequence[McDropoutConfig]:
"""Generates the dropout sweep over dropping parameters for paper."""
sweep = []
for dropout_rate in [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
for dropout_input in [True, False]:
sweep.append(
McDropoutConfig(
dropout_rate=dropout_rate,
dropout_input=dropout_input))
return tuple(sweep)
def l2reg_sweep() -> Sequence[McDropoutConfig]:
"""Generates the dropout sweep over l2 regularization parameters for paper."""
sweep = []
for adaptive_weight_scale in [True, False]:
for length_scale in [1, 3, 10]:
sweep.append(
McDropoutConfig(
adaptive_weight_scale=adaptive_weight_scale,
length_scale=length_scale))
return tuple(sweep)
def network_sweep() -> Sequence[McDropoutConfig]:
"""Generates the dropout sweep over dropping parameters for paper."""
sweep = []
for hidden_sizes in [(50, 50), (100, 100), (50, 50, 50)]:
sweep.append(McDropoutConfig(hidden_sizes=hidden_sizes))
return tuple(sweep)
def combined_sweep() -> Sequence[McDropoutConfig]:
return tuple(droprate_sweep()) + tuple(l2reg_sweep()) + tuple(network_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=McDropoutConfig(),
ctor=make_mc_dropout_agent,
sweep=combined_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/dropout.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep for Random Forest agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import random_forest
def rf_sweep() -> Sequence[random_forest.RandomForestConfig]:
sweep = []
for n_estimators in [10, 100, 1000]:
for criterion in ['gini', 'entropy']:
sweep.append(random_forest.RandomForestConfig(n_estimators, criterion))
return tuple(sweep)
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=random_forest.RandomForestConfig(),
ctor=random_forest.make_agent,
sweep=rf_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/random_forest.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweeps for K-nearest neighbors agent."""
from typing import Sequence
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import knn
def knn_sweep() -> Sequence[knn.KnnConfig]:
sweep = []
for num_neighbors in [1, 5, 10, 30, 50, 100]:
for weighting in ['uniform', 'distance']:
sweep.append(knn.KnnConfig(num_neighbors, weighting))
return tuple(sweep)
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=knn.KnnConfig(),
ctor=knn.make_agent,
sweep=knn_sweep,
)
|
neural_testbed-master
|
neural_testbed/agents/factories/sweeps/testbed_2d/knn.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Load a problem_id from sweep with CSV logging."""
from neural_testbed import base as testbed_base
from neural_testbed import logging
from neural_testbed.leaderboard import load
def problem_from_id(problem_id: str,
results_dir: str = '/tmp/neural_testbed',
overwrite: bool = False) -> testbed_base.TestbedProblem:
"""Factory method to load problem from problem_id and wrap it with a csv logger."""
problem = load.problem_from_id(problem_id)
return logging.wrap_problem_csv(problem, problem_id, results_dir, overwrite)
|
neural_testbed-master
|
neural_testbed/leaderboard/load_csv.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Display functions for leaderboard entries."""
from typing import Sequence
from neural_testbed.leaderboard import score
import numpy as np
import pandas as pd
SCORE_COL = 'kl_estimate'
DISPLAY_COLS = (
'agent_name', 'normalized_kl', 'normalized_stderr',
'mean_test_acc', 'mean_train_seconds', 'mean_evaluation_seconds'
)
def _stderr(x):
return np.std(x) / np.sqrt(len(x))
def _extract_tau_data(data: score.LeaderboardData, tau: int) -> pd.DataFrame:
assert tau in data.df.tau.unique()
return data.df[data.df.tau == tau].copy()
def _compute_mean(df: pd.DataFrame, column_name: str):
"""Computes mean running time based on column column_name."""
if column_name not in df.columns:
df[column_name] = 0
mean_df = (df.groupby('agent_name')[column_name]
.agg([np.mean])
.rename({'mean': 'mean_' + column_name}, axis=1)
.reset_index())
return mean_df
def _compute_stderr(df: pd.DataFrame, num_seed_per_class: int = 10):
"""Computes stderr by grouping the problems based on their seeds."""
assert 'seed' in df.columns
df['seed_class'] = df['seed'].apply(lambda x: x % num_seed_per_class)
kl_seed_df = df.groupby(['agent_name',
'seed_class'])['kl_estimate'].mean().reset_index()
stderr_df = kl_seed_df.groupby(['agent_name'
])['kl_estimate'].agg([_stderr]).reset_index()
stderr_df = stderr_df.rename({'_stderr': 'stderr_kl'}, axis=1)
return stderr_df
def compute_normalization(data: score.LeaderboardData,
agent_name: str = 'baseline',
tau: int = 1) -> float:
df = _extract_tau_data(data, tau)
return df[df.agent_name == agent_name]['kl_estimate'].mean()
def compute_ranking(data: score.LeaderboardData,
num_seed_per_class: int = 10,
tau: int = 1,
kl_limit: float = 1e6) -> pd.DataFrame:
"""Compute the ranking based on the average KL divergence."""
# Subsample data to a specific tau
df = _extract_tau_data(data, tau)
if 'baseline:uniform_class_probs' in data.df.agent_name.unique():
normalizing_score = compute_normalization(
data, 'baseline:uniform_class_probs', tau)
else:
print('WARNING: uniform_class_probs agent not included in data, '
'no normalization is applied.')
normalizing_score = 1
# Calculate the mean KL
rank_df = _compute_mean(df, column_name=SCORE_COL)
# Calculate the std error
stderr_df = _compute_stderr(df, num_seed_per_class=num_seed_per_class)
rank_df = pd.merge(rank_df, stderr_df, on='agent_name', how='left')
# Calculate the mean test acc
testacc_df = _compute_mean(df, column_name='test_acc')
rank_df = pd.merge(rank_df, testacc_df, on='agent_name', how='left')
# Calculate the mean training time
traintime_df = _compute_mean(df, column_name='train_seconds')
rank_df = pd.merge(rank_df, traintime_df, on='agent_name', how='left')
# Calculate the mean evaluation time
evaltime_df = _compute_mean(df, column_name='evaluation_seconds')
rank_df = pd.merge(rank_df, evaltime_df, on='agent_name', how='left')
# TODO(author2): Work out what's going wrong with unhashable hypers e.g. list.
for var in data.sweep_vars:
try:
df[var].unique()
except TypeError:
df[var] = df[var].astype(str)
hyper_df = df[data.sweep_vars].drop_duplicates()
df = pd.merge(rank_df, hyper_df, on='agent_name', how='left')
# Adding in the normalized values
df['normalized_kl'] = df['mean_kl_estimate'] / normalizing_score
df['normalized_stderr'] = df['stderr_kl'] / normalizing_score
out_df = df.sort_values('normalized_kl').reset_index().drop({'index'}, axis=1)
# TODO(author2): Find a better way to limit KL in output plots.
return out_df[out_df.normalized_kl < kl_limit]
def display_ranking_df(
data: score.LeaderboardData,
display_cols: Sequence[str] = DISPLAY_COLS,
num_seed_per_class: int = 10,
tau: int = 1) -> pd.DataFrame:
"""Display the ranking based on the average KL divergence."""
display_cols = list(display_cols)
score_df = compute_ranking(data, num_seed_per_class, tau)
return score_df[display_cols]
|
neural_testbed-master
|
neural_testbed/leaderboard/plotting.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.leaderboard.load."""
from absl.testing import absltest
from absl.testing import parameterized
from neural_testbed.leaderboard import load
import numpy as np
class LoadTest(parameterized.TestCase):
@parameterized.parameters([
['classification_2d/0'],
['classification_2d/10'],
['classification_2d/100'],
])
def test_gp_loading(self, problem_id: str):
"""Tests you can load from problem_id and data format matches prior."""
testbed_problem = load.problem_from_id(problem_id)
data = testbed_problem.train_data
prior = testbed_problem.prior_knowledge
assert data.x.shape == (prior.num_train, prior.input_dim)
assert data.y.shape == (prior.num_train, 1)
assert np.all(~np.isnan(data.x))
assert np.all(~np.isnan(data.y))
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/leaderboard/load_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Load testbed entries from csv file."""
import glob
import os
from typing import Any, Sequence, Tuple
from neural_testbed.leaderboard import entries_csv
from neural_testbed.leaderboard import score
from neural_testbed.leaderboard import sweep
from neural_testbed.logging import csv_logger
import pandas as pd
def _load_entry(entry: entries_csv.Entry) -> Tuple[pd.DataFrame, Sequence[str]]:
"""Loads a single entry from csv logs."""
data = []
results_dir = entry.results_dir
for file_path in glob.glob(os.path.join(results_dir, '*.csv')):
_, name = os.path.split(file_path)
# Rough and ready error-checking for only neural testbed csv files.
if not name.startswith(csv_logger.GP_PREFIX):
print('Warning - we recommend you use a fresh folder for the results.')
continue
# Then we will assume that the file is actually a neural testbed result
df = pd.read_csv(file_path)
file_id = name.strip('.csv').split(csv_logger.INITIAL_SEPARATOR)[1]
problem_id = file_id.replace(csv_logger.SAFE_SEPARATOR, sweep.SEPARATOR)
df['problem_id'] = problem_id
df['results_dir'] = results_dir
data.append(df)
df = pd.concat(data, sort=False)
return df, []
def load_entries(
leaderboard_entries: Any,
leaderboard_sweep: Sequence[str] = sweep.CLASSIFICATION_2D,
verbose: bool = True,
) -> score.LeaderboardData:
"""Loads leaderboard entries and outputs a list of cleaned AgentData."""
return score.LeaderboardData(*score.load_entries(
leaderboard_entries, _load_entry, leaderboard_sweep, verbose))
|
neural_testbed-master
|
neural_testbed/leaderboard/score_csv.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of leaderboard."""
from neural_testbed.leaderboard.entries_csv import Entry
# Leaderboard loading of testbed problem
from neural_testbed.leaderboard.load import gaussian_data
from neural_testbed.leaderboard.load import make_categorical_kl_estimator
from neural_testbed.leaderboard.load import problem_from_config
from neural_testbed.leaderboard.load import problem_from_id
from neural_testbed.leaderboard.load import problem_with_distribution_shift
from neural_testbed.leaderboard.load_csv import problem_from_id as problem_from_id_csv
# Leaderboard table
from neural_testbed.leaderboard.plotting import display_ranking_df
# Leaderboard scoring of experiments
from neural_testbed.leaderboard.score import AgentData
from neural_testbed.leaderboard.score import combine_leaderboards
from neural_testbed.leaderboard.score import join_metadata
from neural_testbed.leaderboard.score import LeaderboardData
from neural_testbed.leaderboard.score_csv import load_entries as load_entries_csv
# Leaderboard sweep of testbed problems
from neural_testbed.leaderboard.sweep import CLASSIFICATION
from neural_testbed.leaderboard.sweep import CLASSIFICATION_2D
from neural_testbed.leaderboard.sweep import CLASSIFICATION_2D_TEST
from neural_testbed.leaderboard.sweep import CLASSIFICATION_TEST
from neural_testbed.leaderboard.sweep import ENN_PAPER
from neural_testbed.leaderboard.sweep import ENN_PAPER_TEST
from neural_testbed.leaderboard.sweep import ProblemConfig
from neural_testbed.leaderboard.sweep import REGRESSION
from neural_testbed.leaderboard.sweep import REGRESSION_TEST
from neural_testbed.leaderboard.sweep import SETTINGS
from neural_testbed.leaderboard.sweep import ShiftConfig
|
neural_testbed-master
|
neural_testbed/leaderboard/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Leaderboard entries are stored here."""
from typing import Sequence
import dataclasses
@dataclasses.dataclass
class Entry:
name: str # A string to describe this leaderboard entry.
results_dir: str # A directory to store the output csv.
# pylint:disable=line-too-long
def get_baselines() -> Sequence[Entry]:
"""Default baselines for the testbed leaderboard."""
return [
Entry('uniform', '/tmp/neural_testbed'), # Uniform baseline
]
|
neural_testbed-master
|
neural_testbed/leaderboard/entries_csv.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Defines the leaderboard sweep for GP testbed."""
import dataclasses
from typing import Callable, Dict, List, Optional, Sequence
import chex
from neural_testbed import base
from neural_testbed import generative
# problem_ids are strings of the form {sweep_name}{SEPARATOR}{index}.
SEPARATOR = '/'
# DataFrame results are saved to this name in the log.
DATAFRAME = 'neural_testbed_5'
# TODO(author3): Define a better type for test_distribution
# Maps input dimension to XGenerator for testing data (WIP).
_TestDistCtor = Callable[[int], generative.XGenerator]
# Logit constructor maps key to logit_fn
LogitCtor = Callable[[chex.PRNGKey], generative.LogitFn]
@dataclasses.dataclass(frozen=True)
class ProblemConfig:
"""Problem configuration including prior knowledge and some hyperparams."""
# Agent's a priori knowledge about the problem.
prior_knowledge: base.PriorKnowledge
# Random seed controlling all the randomness in the problem.
seed: int
# Test sampling distribution and logit_ctor, used only for classification
logit_ctor: Optional[LogitCtor] = None # If None --> 2 layer MLP
test_distribution: _TestDistCtor = generative.make_gaussian_sampler
# Number of inputs (X's) used for evaluation.
num_test_seeds: int = 1000
# Number of samples generated from ENN during evaluation.
num_enn_samples: int = 1000
# Number of inputs (X's) cached for evaluation.
num_test_cache: int = 1000 # Used only by GPRegression data_sampler.
epistemic_only: bool = False # Used only by GPRegression.
@property
def meta_data(self):
meta = dataclasses.asdict(self)
meta.pop('prior_knowledge')
meta.update(dataclasses.asdict(self.prior_knowledge))
return meta
@dataclasses.dataclass(frozen=True)
class ShiftConfig:
"""Configuration for distributional shift of input data."""
reject_prob: float
fraction_rejected_classes: float
def regression_sweep(num_seed: int = 10,
initial_seed: int = 0) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for regression.
Args:
num_seed: number of seeds per configuratioon of other hyperparameters.
initial_seed: initial value of the seed.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
# TODO(author2): convert to itertools
seed = initial_seed
for input_dim in [1, 10, 100]:
for data_ratio in [1, 10, 100]:
for noise_std in [0.01, 0.1, 1]:
for unused_seed_inc in range(num_seed):
seed += 1
num_train = int(data_ratio * input_dim)
prior_knowledge = base.PriorKnowledge(
input_dim=input_dim,
num_train=num_train,
noise_std=noise_std,
num_classes=1, # Currently fixed and not part of the configs.
tau=1, # Currently regression only supports tau=1
layers=1,
)
configs.append(
ProblemConfig(prior_knowledge, seed, num_enn_samples=100))
return {f'regression{SEPARATOR}{i}': v for i, v in enumerate(configs)}
def regression_test_sweep() -> Dict[str, ProblemConfig]:
"""Reduced sweep for testing regression."""
full_configs = list(regression_sweep(num_seed=1).values())
configs = _filter_unique_configs(
full_configs,
lambda x: ((x.prior_knowledge.noise_std == 0.1) # pylint: disable=g-long-lambda
and (x.prior_knowledge.input_dim == 10)
and (x.prior_knowledge.num_train == 10)
and (x.prior_knowledge.tau == 1)))
return {f'regression_test{SEPARATOR}{i}': v for i, v in enumerate(configs)}
def classification_sweep(num_seed: int = 5,
initial_seed: int = 0) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for classification problems.
Args:
num_seed: number of seeds per configuration of other hyperparameters.
initial_seed: initial value of the seed.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
# TODO(author2): convert to itertools
for tau in [1, 10]:
seed = initial_seed
for input_dim in [2, 10, 100]:
for data_ratio in [1, 10, 100, 1000]:
for temperature in [0.01, 0.1, 0.5]:
for unused_seed_inc in range(num_seed):
seed += 1
num_train = int(data_ratio * input_dim)
prior_knowledge = base.PriorKnowledge(
input_dim=input_dim,
num_train=num_train,
num_classes=2, # Currently fixed and not part of the configs.
tau=tau,
layers=2,
temperature=temperature,
)
configs.append(
ProblemConfig(
prior_knowledge=prior_knowledge,
seed=seed,
test_distribution=generative.make_polyadic_sampler,
),
)
return {f'classification{SEPARATOR}{i}': v for i, v in enumerate(configs)}
def classification_test_sweep() -> Dict[str, ProblemConfig]:
"""Reduced sweep for testing classification problems."""
full_configs = list(classification_sweep(num_seed=1).values())
configs = _filter_unique_configs(
full_configs,
lambda x: ((x.prior_knowledge.temperature == 0.01) # pylint: disable=g-long-lambda
and (x.prior_knowledge.tau == 1)
and (x.prior_knowledge.input_dim == 2))
)
return {f'classification_test{SEPARATOR}{i}':
v for i, v in enumerate(configs)}
def classification_2d_sweep(num_seed: int = 10,
initial_seed: int = 0) -> Dict[str, ProblemConfig]:
"""Generate hyperparameter sweep for 2d classification problems.
Args:
num_seed: number of seeds per configuratioon of other hyperparameters.
initial_seed: initial value of the seed.
Returns:
Mapping problem_id: gp_settings (for use in gp_load).
"""
configs = []
# TODO(author2): convert to itertools
for tau in [1, 10]:
seed = initial_seed
for num_train in [1, 3, 10, 30, 100, 300, 1000]:
for temperature in [0.01, 0.1, 0.5]:
for unused_seed_inc in range(num_seed):
seed += 1
prior_knowledge = base.PriorKnowledge(
input_dim=2,
num_train=num_train,
num_classes=2, # Currently fixed and not part of the configs.
tau=tau,
layers=2,
temperature=temperature,
)
configs.append(ProblemConfig(prior_knowledge, seed))
return {f'classification_2d{SEPARATOR}{i}': v
for i, v in enumerate(configs)}
def classification_2d_test_sweep() -> Dict[str, ProblemConfig]:
"""Reduced sweep for testing 2d classification problems."""
full_configs = list(classification_2d_sweep(num_seed=1).values())
configs = _filter_unique_configs(
full_configs,
lambda x: ((x.prior_knowledge.temperature == 0.01) # pylint: disable=g-long-lambda
and (x.prior_knowledge.tau == 1))
)
return {f'classification_2d_test{SEPARATOR}{i}':
v for i, v in enumerate(configs)}
def enn_paper_sweep() -> Dict[str, ProblemConfig]:
"""Generates sweep for GP regression in ENN paper."""
configs = list(regression_sweep().values())
return {f'enn_paper{SEPARATOR}{i}': dataclasses.replace(problem_config,
epistemic_only=True)
for i, problem_config in enumerate(configs)}
def enn_paper_test_sweep() -> Dict[str, ProblemConfig]:
"""Reduced sweep for testing ENN paper."""
full_configs = list(regression_sweep(num_seed=1).values())
configs = _filter_unique_configs(full_configs,
lambda x: x.prior_knowledge.noise_std == .1)
return {
f'enn_paper_test{SEPARATOR}{i}':
dataclasses.replace(problem_config, epistemic_only=True)
for i, problem_config in enumerate(configs)
}
def _filter_unique_configs(
configs: Sequence[ProblemConfig],
filter_fn: Callable[[ProblemConfig], bool] = lambda _: True,
) -> List[ProblemConfig]: # pytype: disable=annotation-type-mismatch
"""Filters a list of problem_config to their unique occurrences for testing.
Args:
configs: list of ProblemConfig.
filter_fn: optional function to apply only to subset meeting this condition.
Returns:
List of unique occurrences for testing.
"""
observed_configs = set()
new_configs = []
for problem_config in configs:
if filter_fn(problem_config):
if problem_config not in observed_configs:
new_configs.append(problem_config)
observed_configs.add(problem_config)
return new_configs
def _merge_without_overwrite(
sweeps: Sequence[Dict[str, ProblemConfig]]) -> Dict[str, ProblemConfig]:
"""Merges sequence of dictionaries while avoiding overwriting keys."""
settings = {}
for sweep in sweeps:
if set(sweep).intersection(settings):
raise KeyError('Sweeps should not have the same keys!')
settings.update(sweep)
return settings
SETTINGS = _merge_without_overwrite([
regression_sweep(),
regression_test_sweep(),
enn_paper_sweep(),
enn_paper_test_sweep(),
classification_sweep(),
classification_test_sweep(),
classification_2d_sweep(),
classification_2d_test_sweep(),
])
REGRESSION = tuple(regression_sweep().keys())
REGRESSION_TEST = tuple(regression_test_sweep().keys())
ENN_PAPER = tuple(enn_paper_sweep().keys())
ENN_PAPER_TEST = tuple(enn_paper_test_sweep().keys())
CLASSIFICATION_2D = tuple(classification_2d_sweep().keys())
CLASSIFICATION_2D_TEST = tuple(classification_2d_test_sweep().keys())
CLASSIFICATION = tuple(classification_sweep().keys())
CLASSIFICATION_TEST = tuple(classification_test_sweep().keys())
|
neural_testbed-master
|
neural_testbed/leaderboard/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loading a leaderboard instance for the testbed."""
from typing import Optional, Tuple
from absl import logging
import chex
import haiku as hk
import jax
from neural_testbed import base as testbed_base
from neural_testbed import generative
from neural_testbed import likelihood
from neural_testbed.leaderboard import sweep
def problem_from_id(problem_id: str) -> testbed_base.TestbedProblem:
"""Factory method to load leaderboard problem from problem_id.
This is a user facing function and its only job is to translate problem_id
to prior kowledge.
Args:
problem_id: a string representing a standard problem in the leaderboard.
Returns:
A testbed problem.
"""
logging.info('Loading problem_id: %s', problem_id)
try:
problem_config = sweep.SETTINGS[problem_id]
except KeyError as missing:
raise ValueError(f'Unrecognised problem_id={problem_id}') from missing
return problem_from_config(problem_config)
def problem_from_config(
problem_config: sweep.ProblemConfig) -> testbed_base.TestbedProblem:
"""Returns a testbed problem given a problem config."""
assert problem_config.prior_knowledge.num_classes > 0
if problem_config.prior_knowledge.num_classes > 1:
return _load_classification(problem_config)
else:
return _load_regression(problem_config)
def problem_with_distribution_shift(
problem_config: sweep.ProblemConfig,
shift_config: sweep.ShiftConfig) -> likelihood.SampleBasedTestbed:
"""Returns a classification problem with input distribution shift."""
return _load_classification(problem_config, shift_config)
def _load_classification(
problem_config: sweep.ProblemConfig,
shift_config: Optional[sweep.ShiftConfig] = None,
) -> likelihood.SampleBasedTestbed:
"""Loads a classification problem from problem_config, optional shift_config."""
rng = hk.PRNGSequence(problem_config.seed)
prior_knowledge = problem_config.prior_knowledge
input_dim = prior_knowledge.input_dim
# Parse the logit_ctor from config
if problem_config.logit_ctor is None:
logit_fn = generative.make_2layer_mlp_logit_fn(
input_dim=input_dim,
temperature=prior_knowledge.temperature,
hidden=50,
num_classes=prior_knowledge.num_classes,
key=next(rng),
)
else:
logit_fn = problem_config.logit_ctor(next(rng))
# Parse the distribution shift
if shift_config is None:
override_train_data = None
else:
override_train_data = generative.make_filtered_gaussian_data(
input_dim=prior_knowledge.input_dim,
logit_fn=logit_fn,
reject_prob=shift_config.reject_prob,
fraction_rejected_classes=shift_config.fraction_rejected_classes,
num_samples=prior_knowledge.num_train,
key=next(rng),
)
# Generate the sample based testbed
data_sampler = generative.ClassificationEnvLikelihood(
logit_fn=logit_fn,
x_train_generator=generative.make_gaussian_sampler(input_dim),
x_test_generator=problem_config.test_distribution(input_dim),
num_train=prior_knowledge.num_train,
key=next(rng),
override_train_data=override_train_data,
tau=prior_knowledge.tau,
)
return likelihood.SampleBasedTestbed(
data_sampler=data_sampler,
sample_based_kl=make_categorical_kl_estimator(problem_config, next(rng)),
prior_knowledge=prior_knowledge,
)
def make_categorical_kl_estimator(
problem_config: sweep.ProblemConfig,
key: chex.PRNGKey) -> likelihood.SampleBasedKL:
"""Make sample based KL estimator for categorial models."""
prior_knowledge = problem_config.prior_knowledge
if prior_knowledge.tau > 10:
sample_based_kl = likelihood.CategoricalClusterKL(
cluster_alg=likelihood.RandomProjection(dimension=7),
num_enn_samples=problem_config.num_enn_samples,
num_test_seeds=problem_config.num_test_seeds,
key=key,
)
else:
sample_based_kl = likelihood.CategoricalKLSampledXSampledY(
num_test_seeds=problem_config.num_test_seeds,
num_enn_samples=problem_config.num_enn_samples,
key=key,
num_classes=prior_knowledge.num_classes,
)
sample_based_kl = likelihood.add_classification_accuracy_ece(
sample_based_kl,
num_test_seeds=int(1_000 / prior_knowledge.tau) + 1,
num_enn_samples=100,
num_classes=prior_knowledge.num_classes,
)
return sample_based_kl
def gaussian_data(key: chex.PRNGKey,
num_train: int,
input_dim: int,
num_test: int) -> Tuple[chex.Array, chex.Array]:
"""Generate Gaussian training and test data."""
train_key, test_key = jax.random.split(key)
x_train = jax.random.normal(train_key, [num_train, input_dim])
x_test = jax.random.normal(test_key, [num_test, input_dim])
return x_train, x_test
def _load_regression(
problem_config: sweep.ProblemConfig) -> testbed_base.TestbedProblem:
"""Loads a regression problem from problem_config."""
rng = hk.PRNGSequence(problem_config.seed)
prior_knowledge = problem_config.prior_knowledge
x_train, x_test = gaussian_data(
key=next(rng),
num_train=prior_knowledge.num_train,
input_dim=prior_knowledge.input_dim,
num_test=problem_config.num_test_cache,
)
if problem_config.epistemic_only:
# Special case used only for the ENN paper.
assert prior_knowledge.tau == 1, 'Only works for tau=1'
data_sampler = generative.GPRegression(
kernel_fn=generative.make_benchmark_kernel(prior_knowledge.input_dim),
x_train=x_train,
x_test=x_test,
key=next(rng),
tau=prior_knowledge.tau,
noise_std=prior_knowledge.noise_std,
)
return generative.TestbedGPRegression(
data_sampler,
prior_knowledge,
key=next(rng),
num_enn_samples=problem_config.num_enn_samples)
data_sampler = generative.GPRegressionEnvLikelihood(
kernel_fn=generative.make_benchmark_kernel(prior_knowledge.input_dim),
x_train=x_train,
x_test=x_test,
key=next(rng),
tau=prior_knowledge.tau,
noise_std=prior_knowledge.noise_std,
)
sample_based_kl = likelihood.GaussianSampleKL(
# This KL estimator cannot handle very large num_test_seed * tau
num_test_seeds=int(problem_config.num_test_seeds
/ prior_knowledge.tau) + 1,
num_enn_samples=problem_config.num_enn_samples,
enn_sigma=prior_knowledge.noise_std,
key=next(rng),
)
return likelihood.SampleBasedTestbed(
data_sampler=data_sampler,
sample_based_kl=sample_based_kl,
prior_knowledge=prior_knowledge,
)
|
neural_testbed-master
|
neural_testbed/leaderboard/load.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.leaderboard.score_csv."""
import sys
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from neural_testbed.leaderboard import entries_csv
from neural_testbed.leaderboard import score
from neural_testbed.leaderboard import score_csv
from neural_testbed.leaderboard import sweep
from neural_testbed.logging import csv_logger
FLAGS = flags.FLAGS
def log_fake_results(problem_id: str, results_dir: str) -> None:
"""Populate a fake set of results."""
logger = csv_logger.Logger(problem_id, results_dir)
logger.write({
'kl_estimate': 10.,
'total_seconds': 2.,
'train_seconds': 1.,
})
class ScoreCsvTest(parameterized.TestCase):
@parameterized.parameters([['cool_agent'], ['uncool_agent']])
def test_logger(self, name: str):
"""Write some fake results to csv and then load them back in."""
try:
flags.FLAGS.test_tmpdir
except flags.UnparsedFlagAccessError:
# Need to initialize flags when running `pytest`.
flags.FLAGS(sys.argv)
results_dir = self.create_tempdir().full_path
for problem_id in sweep.CLASSIFICATION_2D[:10]:
log_fake_results(problem_id=problem_id, results_dir=results_dir)
# Make a fake entry with this given name, and load it back in.
entry = entries_csv.Entry(name, results_dir)
data = score_csv.load_entries(entry)
# Check that the data is the right type
self.assertIsInstance(data, score.LeaderboardData,
'Data is not the right type')
# Check that the agent name has been passed through
self.assertIn(name, data.names, 'the agent name has been passed through.')
# Check that sweep metadata is joined correctly on problem_id
self.assertIn('problem_id', data.df.columns,
'sweep metadata is not joined correctly on problem_id.')
# Check that we only loaded one agent
self.assertLen(data.agents, 1)
agent_data = data.agents[0]
self.assertIsInstance(agent_data, score.AgentData,
'Agent data is not the right type.')
# Check the quality of this single agent data
self.assertEqual(agent_data.name, name,
'Agent data does not have the correct name.')
self.assertLess(agent_data.pct_health, 0.5, 'Health is less that 50%.')
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/leaderboard/score_csv_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Scoring/validation capabilities for leaderboard entries."""
import copy
import dataclasses
from typing import Any, Optional, Sequence, Tuple
from neural_testbed import logging
from neural_testbed.leaderboard import sweep
import numpy as np
import pandas as pd
# Maximum KL valu used to fill missing or non-numeric values
KL_FILL = 1e6
_AGENT_SUFFIX = '_agent'
@dataclasses.dataclass
class AgentData:
"""Contains cleaned data for a single ENN agent."""
df: pd.DataFrame # Data from full evaluation run
name: str = 'agent' # Name for the agent in plots etc
score: float = KL_FILL # Overall score on the testbed
pct_health: float = 0. # 1 for perfect data, 0 for all missing / NaN
validated: bool = False # Has this been validated
xm_link: Optional[str] = None # Link to experiment
report_link: Optional[str] = None # Link to experiment report
@dataclasses.dataclass
class LeaderboardData:
"""Contains cleaned data for a collections of agents."""
agents: Sequence[AgentData] # All of the constituent agents.
sweep_vars: Optional[Sequence[str]] = None
def __post_init__(self):
"""Form a cleaned version of the joined data."""
self.df = _make_leaderboard_dataframe(self.agents, self.sweep_vars)
self.names = [x.name for x in self.agents]
def join_metadata(df: pd.DataFrame) -> pd.DataFrame:
"""Joins data with GP settings based on problem_id."""
assert 'problem_id' in df.columns
metadata = copy.deepcopy(sweep.SETTINGS)
data = []
for problem_id, problem_config in metadata.items():
gp_params = {'problem_id': problem_id}
gp_params.update(problem_config.meta_data)
data.append(gp_params)
gp_df = pd.DataFrame(data)
# TODO(author2): Work out how to handle clash between agent and problem_id
# e.g. if an agent sweeps over temperature and so does the problem!
return pd.merge(df, gp_df, on='problem_id', suffixes=(_AGENT_SUFFIX, ''))
def _check_drop_duplicate_logs(df: pd.DataFrame,
verbose: bool = True) -> pd.DataFrame:
"""Check for duplicate logging instances, indicating some kind of error."""
main_df = df.copy()
# Count the number of replicas for each problem_id
count_df = main_df.groupby(['problem_id']).apply(len)
count_df = count_df.reset_index().rename({0: 'replicas'}, axis=1)
count_df = count_df.drop_duplicates(subset=['problem_id'])
duplicates = count_df[count_df.replicas > 1]
# If some problem_id have more than one entry --> print a warning message
if len(duplicates) > 0: # pylint:disable=g-explicit-length-test
if verbose:
print('WARNING: multiple logs per problem_id, selecting first entry.')
print(duplicates.head())
# Drop duplicate problem_id in case they got logged several times.
df = df.drop_duplicates('problem_id')
return df
def _fix_legacy_problem_id(df: pd.DataFrame) -> pd.DataFrame:
# TODO(author2): remove need for this fix after renaming gp_id -> problem_id.
if 'gp_id' in df.columns and 'problem_id' not in df.columns:
df['problem_id'] = df['gp_id']
return df
def _clean_single_agent(df_in: pd.DataFrame,
leaderboard_sweep: Sequence[str],
agent_name: str = 'agent',
kl_fill: float = KL_FILL,
negative_tolerance: float = -1e-4,
verbose: bool = True) -> AgentData:
"""Validates and cleans the submission for a single agent."""
df = df_in.copy()
df = _fix_legacy_problem_id(df)
df = join_metadata(df)
df['raw_kl_estimate'] = df['kl_estimate']
problem_ids = df.problem_id.unique()
# Adding data_ratio as a column to df
if 'data_ratio' not in df.columns:
if 'num_train' in df.columns and 'input_dim' in df.columns:
df['data_ratio'] = df['num_train'] / df['input_dim']
# If agent name is already in the data, rename to flag_agent_name
if 'agent_name' in df.columns:
assert len(df.agent_name.unique()) == 1
flag_agent_name = df['agent_name'].iloc[0]
df['flag_agent_name'] = flag_agent_name
# Use this as the agent name if none is passed
if agent_name == 'agent':
agent_name = flag_agent_name
# Set up a unique name for the agent
df['agent_name'] = agent_name
if verbose:
print('\n' + '+' * 80)
print(f'Cleaning data for agent = {agent_name}')
# Drop extra problem_id.
extra_ids = [idx for idx in problem_ids if idx not in leaderboard_sweep]
if extra_ids and verbose:
print(f'WARNING: agent={agent_name} has {len(extra_ids)} extra problem_ids'
f' these will be dropped:')
print(extra_ids)
df = df[~df.problem_id.isin(extra_ids)]
# Check for duplicate logging instances
# TODO(author3): Reflect duplicate entries in pct_health
df = _check_drop_duplicate_logs(df, verbose)
# Fill missing problem_id
missing_ids = [idx for idx in leaderboard_sweep if idx not in problem_ids]
if missing_ids:
fill_dict = {
'agent_name': agent_name,
'problem_id': missing_ids,
'kl_estimate': kl_fill,
}
# Don't include the problem_id and kl_estimate columns for missing value.
fill_columns = [
col for col in df.columns if col not in ['problem_id', 'kl_estimate']]
for col in fill_columns:
# TODO(author2): Sort out unhashable columns...
try:
num_unique = len(df[col].unique())
except TypeError:
df[col] = df[col].astype(str)
num_unique = len(df[col].unique())
if num_unique == 1:
fill_dict[col] = df[col].iloc[0]
# TODO(author2): Sort out the merging/filling here... not too safe
df = pd.concat([df, pd.DataFrame(fill_dict)])
df = join_metadata(df)
if verbose:
print(f'WARNING: agent={agent_name} has {len(missing_ids)} missing '
f'problem_ids (these will be filled with {kl_fill}')
print(missing_ids)
# Negative KL estimates
negative_kl = df[df.kl_estimate < 0]
num_negative = len(negative_kl)
# You're only bad negative if even lower than negative tolerance
bad_negative = len(df[df.kl_estimate < negative_tolerance])
if num_negative:
kl_values = negative_kl.kl_estimate
df.loc[df.kl_estimate < 0, 'kl_estimate'] = 0
if verbose:
print(f'WARNING: agent={agent_name} has {num_negative} negative KL, '
'these will be clipped at zero.')
print(f'mean={kl_values.mean()}, min={kl_values.min()}')
# Non-numeric KL estimates
bad_kl = df[~np.isfinite(df.kl_estimate)]
num_bad = len(bad_kl)
if num_bad:
df.loc[~np.isfinite(df.kl_estimate), 'kl_estimate'] = kl_fill
if verbose:
print(f'WARNING: agent={agent_name} has {num_bad} non-finite KL. '
f'These values will be filled with {kl_fill}.\n')
# Aggregate health of the entry
total_bad = num_bad + bad_negative + len(extra_ids) + len(missing_ids)
total_entries = len(df)
df = df.assign(
pct_finite=1 - num_bad/total_entries,
pct_negative=1 - num_negative/total_entries,
pct_extra=len(extra_ids)/total_entries,
pct_missing=len(missing_ids)/total_entries,
pct_health=1 - total_bad/total_entries,
)
data = AgentData(
df=df,
name=agent_name,
score=df.kl_estimate.mean(),
pct_health=1-total_bad/total_entries,
validated=True,
)
return data
def _single_instance_or_list_to_list(var_instances: Any) -> Sequence[Any]:
"""Convert a potentially single-instance to a list of instance."""
try:
_ = len(var_instances)
if isinstance(var_instances, str):
var_instances = [var_instances]
except TypeError:
var_instances = [var_instances]
return var_instances
def _make_variable_postfix(sub_vars: Any, sweep_vars: Sequence[str]) -> str:
"""Join hyperparameters to identify agent, e.g. num_ensemble=1_net=mlp."""
sub_vars = _single_instance_or_list_to_list(sub_vars)
assert len(sub_vars) == len(sweep_vars)
return ','.join([f'{a}={b}' for a, b in zip(sweep_vars, sub_vars)])
def _maybe_add_links(agent: AgentData, entry: Any) -> AgentData:
# Internal use only.
return agent
def _load_single_entry(
entry: Any, # TODO(author2) turn this into a typevar for entries
entry_loader: logging.EntryLoader,
leaderboard_sweep: Sequence[str],
verbose: bool = True,
) -> Tuple[Sequence[AgentData], Sequence[str]]:
"""Loads a single leaderboard entry and outputs list of AgentData."""
df, sweep_vars = entry_loader(entry)
if sweep_vars:
# One entry for each of the sweep_vars
data = []
for sub_vars, sub_df in df.groupby(sweep_vars):
post_fix = _make_variable_postfix(sub_vars, sweep_vars)
agent = _clean_single_agent(
sub_df, leaderboard_sweep, agent_name=f'{entry.name}:{post_fix}',
verbose=verbose)
data.append(_maybe_add_links(agent, entry))
else:
# The whole entry is just for one agent
agent = _clean_single_agent(
df, leaderboard_sweep, agent_name=entry.name, verbose=verbose)
data = [_maybe_add_links(agent, entry)]
return data, sweep_vars
def load_entries(
leaderboard_entries: Any, # TODO(author2): sort out this typing.
entry_loader: logging.EntryLoader,
leaderboard_sweep: Sequence[str] = sweep.CLASSIFICATION_2D,
verbose: bool = True,
) -> Tuple[Sequence[AgentData], Sequence[str]]:
"""Loads leaderboard entries and outputs a list of cleaned AgentData."""
leaderboard_entries = _single_instance_or_list_to_list(leaderboard_entries)
data = []
sweep_vars = ['agent_name', 'notes', 'pct_health', 'report_link']
for entry in leaderboard_entries:
sub_data, sub_sweep = _load_single_entry(
entry, entry_loader, leaderboard_sweep, verbose)
data.extend(sub_data)
sweep_vars.extend(sub_sweep)
return data, sweep_vars
def combine_leaderboards(boards: Sequence[LeaderboardData]) -> LeaderboardData:
"""Combine multiple leaderboards into one."""
agents = []
sweep_vars = []
names = []
for board in boards:
sweep_vars.extend(board.sweep_vars)
for agent in board.agents:
agents.append(agent)
if agent.name not in names:
names.append(names)
else:
raise ValueError(f'Duplicate agent={agent.name} encountered.'
' You must rename agent to combine leaderboards.')
return LeaderboardData(agents, list(set(sweep_vars))) # For unique columns
def _make_leaderboard_dataframe(
agents: Sequence[AgentData],
sweep_vars: Optional[Sequence[str]] = None,
) -> pd.DataFrame:
"""Process leaderboard entries into a unified dataframe."""
data = []
for agent in agents:
data.append(agent.df.assign(report_link=agent.report_link))
df = pd.concat(data)
df['entry_name'] = df.agent_name.apply(lambda x: x.split(':')[0])
df['task'] = df.problem_id.apply(lambda x: x.split('/')[0])
if sweep_vars:
for col in sweep_vars:
try:
df[col] = df[col].fillna('nan') # Fixes bug in pandas groupby NaN.
except KeyError:
df[col] = 'nan' # This column was completely missing
except ValueError:
pass # This column did not want to be coerced to 'nan'
return df
|
neural_testbed-master
|
neural_testbed/leaderboard/score.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example running an ENN on Thompson bandit task."""
from absl import app
from absl import flags
from neural_testbed.agents.factories.sweeps import testbed_2d as factories
from neural_testbed.bandit import agents
from neural_testbed.bandit import thompson
# ENN training
flags.DEFINE_string('agent_id', 'ensemble+', 'Which benchmark agent to run.')
# Bandit problem
flags.DEFINE_integer('input_dim', 2, 'Input dimension')
flags.DEFINE_float('temperature', 0.1, 'Temperature')
flags.DEFINE_integer('num_actions', 50, 'Number of actions')
flags.DEFINE_integer('num_steps', 10_000, 'Number of timesteps')
flags.DEFINE_integer('seed', 0, 'Bandit seed')
flags.DEFINE_integer('steps_per_obs', 1, 'sgds per observation')
FLAGS = flags.FLAGS
def main(_):
# Override this config for different ENNs... must be a VanillaEnnAgent
paper_agent = factories.get_paper_agent(FLAGS.agent_id)
# Convert testbed agent to sequential decision agent
config, l2_weight_decay = agents.make_config_l2_for_bandit(
paper_agent=paper_agent,
temperature=FLAGS.temperature,
seed=FLAGS.seed,
)
# Run the bandit experiment with appropriate logging
experiment = thompson.ThompsonEnnBandit(
enn_config=config,
input_dim=FLAGS.input_dim,
num_actions=FLAGS.num_actions * FLAGS.input_dim,
temperature=FLAGS.temperature,
seed=FLAGS.seed,
steps_per_obs=FLAGS.steps_per_obs,
l2_weight_decay=l2_weight_decay,
)
log_freq = int(FLAGS.num_steps / 100)
if log_freq == 0:
log_freq = 1
experiment.run(FLAGS.num_steps, log_freq)
if __name__ == '__main__':
app.run(main)
|
neural_testbed-master
|
neural_testbed/bandit/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
neural_testbed-master
|
neural_testbed/bandit/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code to adjust testbed agents for sequential decision problems.
The focus of this code is to add functionality for a sensible decay of "prior
effect" as the number of training points grow.
"""
import typing
from typing import Tuple
import chex
from enn import base as enn_base
from enn import datasets
from enn import losses
from enn import networks
import haiku as hk
import jax.numpy as jnp
from neural_testbed import agents
from neural_testbed import base as testbed_base
from neural_testbed.agents.factories import base as factories_base
from neural_testbed.agents.factories import bbb
def make_config_l2_for_bandit(
paper_agent: factories_base.PaperAgent,
temperature: float = 1,
seed: int = 0,
) -> Tuple[agents.VanillaEnnConfig, float]:
"""Converts agent to sequential decision form and appropriate l2 decay."""
# Extract agent config
config = paper_agent.default
# Extract the l2 weight decay parameter from the agent default.
# Then, override that paper.default to be zero so we don't double l2 decay.
if hasattr(config, 'l2_weight_decay'):
l2_weight_decay = config.l2_weight_decay
config.l2_weight_decay = 0
elif hasattr(config, 'dropout_rate'):
l2_weight_decay = config.length_scale
config.length_scale = 0
else:
l2_weight_decay = 0
# Rescale l2 weight decay by temperature, and potentially by ensemble size
l2_weight_decay *= 2 * temperature
if hasattr(config, 'num_ensemble'):
l2_weight_decay = l2_weight_decay / config.num_ensemble
# Override seed and form agent
config.seed = seed
agent = paper_agent.ctor(config)
assert isinstance(agent, agents.VanillaEnnAgent)
agent = typing.cast(agents.VanillaEnnAgent, agent)
# If the agent is bbb then we should override the loss_fn
if isinstance(config, bbb.BBBConfig):
agent.config.loss_ctor = _make_bbb_bandit_loss(config)
return agent.config, l2_weight_decay
def _make_bbb_bandit_loss(config: bbb.BBBConfig) -> agents.LossCtor:
"""BBB loss with decaying prior through time for sequential decisions."""
def loss_ctor(prior: testbed_base.PriorKnowledge,
enn: networks.EnnArray) -> losses.LossFnArray:
del enn
log_likelihood_fn = losses.get_categorical_loglike_fn(prior.num_classes)
prior_kl_fn = losses.get_analytical_diagonal_linear_model_prior_kl_fn(
1, config.sigma_1)
def elbo_loss(
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: enn_base.Index,
) -> enn_base.LossOutput:
"""Elbo loss with decay per num_steps stored in the batch."""
out, state = apply(params, state, batch.x, index)
log_likelihood = log_likelihood_fn(out, batch)
prior_kl = prior_kl_fn(out, params, index)
chex.assert_equal_shape([log_likelihood, prior_kl])
# Rescaling by num_steps and temperature
prior_kl *= 2 * jnp.sqrt(prior.temperature) / batch.extra['num_steps']
return prior_kl - log_likelihood, (state, {})
return losses.average_single_index_loss(elbo_loss, config.num_index_samples)
return loss_ctor
|
neural_testbed-master
|
neural_testbed/bandit/agents.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.baselines.replay.
Branched from bsuite: https://github.com/deepmind/bsuite.
"""
from absl.testing import absltest
from neural_testbed.bandit import replay as replay_lib
import numpy as np
class BasicReplayTest(absltest.TestCase):
def test_end_to_end(self):
shapes = (10, 10, 3), ()
capacity = 5
def generate_sample():
return [np.random.randint(0, 256, size=(10, 10, 3), dtype=np.uint8),
np.random.uniform(size=())]
replay = replay_lib.Replay(capacity=capacity)
# Does it crash if we sample when there's barely any data?
sample = generate_sample()
replay.add(sample)
samples = replay.sample(size=2)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (2,) + shape)
# Fill to capacity.
for _ in range(capacity - 1):
replay.add(generate_sample())
samples = replay.sample(size=3)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (3,) + shape)
replay.add(generate_sample())
samples = replay.sample(size=capacity)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (capacity,) + shape)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/bandit/replay_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple, uniformly sampled replay buffer.
Branched from bsuite: https://github.com/deepmind/bsuite.
"""
from typing import Any, Optional, Sequence
import numpy as np
class Replay:
"""Uniform replay buffer. Allocates all required memory at initialization."""
_data: Optional[Sequence[np.ndarray]]
_capacity: int
_num_added: int
def __init__(self, capacity: int):
"""Initializes a new `Replay`.
Args:
capacity: The maximum number of items allowed in the replay. Adding
items to a replay that is at maximum capacity will overwrite the oldest
items.
"""
self._data = None
self._capacity = capacity
self._num_added = 0
def add(self, items: Sequence[Any]):
"""Adds a single sequence of items to the replay.
Args:
items: Sequence of items to add. Does not handle batched or nested items.
"""
if self._data is None:
self._preallocate(items)
for slot, item in zip(self._data, items):
slot[self._num_added % self._capacity] = item
self._num_added += 1
def sample(self, size: int) -> Sequence[np.ndarray]:
"""Returns a transposed/stacked minibatch. Each array has shape [B, ...]."""
indices = np.random.randint(self.size, size=size)
return [slot[indices] for slot in self._data]
def reset(self,):
"""Resets the replay."""
self._data = None
@property
def size(self) -> int:
return min(self._capacity, self._num_added)
@property
def fraction_filled(self) -> float:
return self.size / self._capacity
def _preallocate(self, items: Sequence[Any]):
"""Assume flat structure of items."""
as_array = []
for item in items:
if item is None:
raise ValueError('Cannot store `None` objects in replay.')
as_array.append(np.asarray(item))
self._data = [np.zeros(dtype=x.dtype, shape=(self._capacity,) + x.shape)
for x in as_array]
def __repr__(self):
return 'Replay: size={}, capacity={}, num_added={}'.format(
self.size, self._capacity, self._num_added)
|
neural_testbed-master
|
neural_testbed/bandit/replay.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.experiments.dropout.run."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from neural_testbed.bandit import run
FLAGS = flags.FLAGS
PAPER_AGENTS = ['mlp', 'bbb', 'dropout', 'ensemble', 'ensemble+', 'hypermodel']
class RunTest(parameterized.TestCase):
@parameterized.parameters([[x] for x in PAPER_AGENTS])
def test_neural_testbed(self, agent_id: str):
FLAGS.agent_id = agent_id
FLAGS.input_dim = 2
FLAGS.num_steps = 2
FLAGS.num_actions = 2
run.main(None)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/bandit/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Thompson sampling evaluation of ENN agent on bandit task."""
import functools
from typing import Dict, Optional, Tuple
from acme.utils import loggers
import chex
from enn import base as enn_base
from enn import datasets
from enn import losses
from enn import networks
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import agents
from neural_testbed import base as testbed_base
from neural_testbed import generative
from neural_testbed.bandit import replay
from neural_testbed.leaderboard import sweep
import optax
class ThompsonEnnBandit:
"""Experiment of Thompson sampling bandit."""
def __init__(
self,
enn_config: agents.VanillaEnnConfig,
input_dim: int,
num_actions: int,
logit_ctor: Optional[sweep.LogitCtor] = None,
temperature: float = 1,
steps_per_obs: int = 1,
logger: Optional[loggers.Logger] = None,
batch_size: int = 128,
l2_weight_decay: float = 1,
replay_capacity: int = 10_000,
learning_rate: float = 1e-3,
seed: int = 0,
):
"""Initialize a Thompson Sampling experiment."""
# Initializing the agent internals
prior = testbed_base.PriorKnowledge(
input_dim=input_dim,
num_train=100,
num_classes=2,
tau=1,
layers=2,
temperature=temperature,
)
self.enn = enn_config.enn_ctor(prior)
loss_fn = enn_config.loss_ctor(prior, self.enn)
loss_fn = functools.partial(loss_fn, self.enn)
def predicate(module_name: str, name: str, value) -> bool:
del name, value
return 'prior' not in module_name
def loss_with_decay(
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> enn_base.LossOutput:
# Adding annealing l2 weight decay manually
data_loss, (state, metrics) = loss_fn(params, state, batch, key)
l2_weight = losses.l2_weights_with_predicate(params, predicate)
metrics['l2_weight'] = l2_weight
decay_loss = l2_weight_decay * l2_weight / batch.extra['num_steps']
return data_loss + decay_loss, (state, metrics)
self._loss_with_decay = jax.jit(loss_with_decay)
optimizer = optax.adam(learning_rate)
# Forward network at random index
def forward(params: hk.Params,
inputs: chex.Array,
key: chex.PRNGKey) -> chex.Array:
index = self.enn.indexer(key)
unused_state = {}
out, unused_state = self.enn.apply(params, unused_state, inputs, index)
return out
self._forward = jax.jit(forward)
# Perform an SGD step on a batch of data
def sgd_step(
params: hk.Params,
opt_state: optax.OptState,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> Tuple[hk.Params, optax.OptState]:
unused_state = {}
grads, _ = jax.grad(
loss_with_decay, has_aux=True)(params, unused_state, batch, key)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
self._sgd_step = jax.jit(sgd_step)
# Generating the underlying function
self.rng = hk.PRNGSequence(seed)
self.actions = jax.random.normal(next(self.rng), [num_actions, input_dim])
# Create the logit_fn
if logit_ctor is None:
logit_fn = generative.make_2layer_mlp_logit_fn(
input_dim=input_dim,
temperature=temperature,
hidden=50,
num_classes=2,
key=next(self.rng),
)
else:
logit_fn = logit_ctor(next(self.rng))
logits = logit_fn(self.actions)
# Vector of probabilities of rewards for each action
self.probs = jax.nn.softmax(logits)[:, 1]
chex.assert_shape(self.probs, [num_actions])
self.max_prob = jnp.max(self.probs)
# Initializing the network
index = self.enn.indexer(next(self.rng))
self.params, self.network_state = self.enn.init(
next(self.rng), self.actions, index)
self.opt_state = optimizer.init(self.params)
self._steps_per_obs = steps_per_obs
self._temperature = temperature
self._batch_size = batch_size
self.l2_weight_decay = l2_weight_decay
self.replay = replay.Replay(capacity=replay_capacity)
self.logger = (
logger or loggers.make_default_logger('experiment', time_delta=0))
self.num_steps = 0
self.total_regret = 0
def select_action(params: hk.Params,
key: chex.PRNGKey) -> Dict[str, chex.Array]:
net_key, noise_key, selection_key = jax.random.split(key, 3)
net_out = forward(params, self.actions, net_key)
logits = networks.parse_net_output(net_out)
probs = jax.nn.softmax(logits)[:, 1]
action = _random_argmax(probs, selection_key)
chosen_prob = self.probs[action]
reward = jax.random.bernoulli(noise_key, chosen_prob)
regret = self.max_prob - chosen_prob
return { # pytype: disable=bad-return-type # numpy-scalars
'action': action,
'reward': reward,
'regret': regret,
'chosen_prob': chosen_prob, # for debugging
}
self._select_action = jax.jit(select_action)
def run(self, num_steps: int, log_freq: int = 1):
"""Run a TS experiment for num_steps."""
for _ in range(num_steps):
self.num_steps += 1
regret = self.step()
self.total_regret += regret
if self.num_steps % log_freq == 0:
self.logger.write({
'total_regret': self.total_regret,
't': self.num_steps,
'ave_regret': self.total_regret / self.num_steps,
'regret': regret,
})
for _ in range(self._steps_per_obs):
if self.num_steps >= 1:
self.params, self.opt_state = self._sgd_step(
self.params, self.opt_state, self._get_batch(), next(self.rng))
def step(self) -> float:
"""Select action, update replay and return the regret."""
results = self._select_action(self.params, next(self.rng))
self.replay.add([
self.actions[results['action']],
jnp.ones([1]) * results['reward'],
jnp.ones([1], dtype=jnp.int64) * self.num_steps,
])
return float(results['regret'])
def _get_batch(self) -> datasets.ArrayBatch:
actions, rewards, indices = self.replay.sample(self._batch_size)
return datasets.ArrayBatch( # pytype: disable=wrong-arg-types # numpy-scalars
x=actions,
y=rewards,
data_index=indices,
extra={'num_steps': self.num_steps},
)
def _random_argmax(
vals: chex.Array, key: chex.PRNGKey, scale: float = 1e-7
) -> int:
"""Select argmax with additional random noise."""
noise = jax.random.uniform(key, vals.shape)
return jnp.argmax(vals + scale * noise, axis=0)
|
neural_testbed-master
|
neural_testbed/bandit/thompson.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Neural testbed library for uncertainty evaluation."""
|
neural_testbed-master
|
neural_testbed/opensource/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for efficient_agent.neural_testbed.generative.nt_kernels."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.config
import jax.numpy as jnp
from neural_testbed.generative import nt_kernels
# TODO(author1): move this config update to an explicit initialize function.
jax.config.update('jax_enable_x64', True)
class NtKernelsTest(parameterized.TestCase):
@parameterized.parameters([[x] for x in range(10)])
def test_benchmark_kernel(self, seed: int):
# Generate benchmark kernel
kernel_fn = nt_kernels.make_benchmark_kernel()
rng = hk.PRNGSequence(seed)
# Evaluate at random x in 1D
x = jax.random.normal(next(rng), [1000, 1])
kernel = kernel_fn(x, x, 'nngp')
adjusted_kernel = kernel + 1e-6 * jnp.eye(len(kernel))
# Check that posterior sample non-nan
for _ in range(10):
sample = jax.random.multivariate_normal(
next(rng), jnp.zeros(len(kernel)), adjusted_kernel)
assert jnp.all(~jnp.isnan(sample))
@parameterized.parameters(
itertools.product(range(10), [1, 10], ['nngp', 'ntk']))
def test_kernel_matrix(self, seed: int, input_dim: int, method: str):
"""Checks that the kernel matrix is symmetric and positive semi-definite."""
def is_symmetric(x: jnp.ndarray, rtol: float = 1e-05, atol: float = 1e-08):
return jnp.allclose(x, x.T, rtol=rtol, atol=atol)
def is_pos_semi_definite(x: jnp.ndarray):
return jnp.all(jnp.linalg.eigvals(x) >= -1e-10)
# Generate benchmark kernel
kernel_fn = nt_kernels.make_benchmark_kernel()
rng = hk.PRNGSequence(seed)
# Evaluate at random x
x = jax.random.normal(next(rng), [100, input_dim])
kernel = kernel_fn(x, x, method)
# Check that the kernel is symmetric, positive semi-definite
assert is_symmetric(kernel)
assert is_pos_semi_definite(kernel)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/nt_kernels_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Specific neural tangent kernels."""
import dataclasses
from typing import Any, List, Optional, Tuple, TypeVar, Union
from jax import random
import jax.numpy as np
import neural_tangents as nt
from neural_tangents import stax
import numpy as np
from typing_extensions import Protocol
T = TypeVar('T')
PyTree = Any
NTTree = Union[List[T], Tuple[T, ...], T]
Shapes = NTTree[Tuple[int, ...]]
"""A shape - a tuple of integers, or an :class:`NTTree` of such tuples.
"""
# Layer Definition.
class InitFn(Protocol):
"""A type alias for initialization functions.
Initialization functions construct parameters for neural networks given a
random key and an input shape. Specifically, they produce a tuple giving the
output shape and a PyTree of parameters.
"""
def __call__(
self,
rng: random.KeyArray,
input_shape: Shapes,
**kwargs
) -> Tuple[Shapes, PyTree]:
...
class ApplyFn(Protocol):
"""A type alias for apply functions.
Apply functions do computations with finite-width neural networks. They are
functions that take a PyTree of parameters and an array of inputs and produce
an array of outputs.
"""
def __call__(
self,
params: PyTree,
inputs: NTTree[np.ndarray],
*args,
**kwargs
) -> NTTree[np.ndarray]:
...
KernelOrInput = Union[NTTree[nt.Kernel], NTTree[np.ndarray]]
Get = Union[Tuple[str, ...], str, None]
class LayerKernelFn(Protocol):
"""A type alias for pure kernel functions.
A pure kernel function takes a PyTree of Kernel object(s) and produces a
PyTree of Kernel object(s). These functions are used to define new layer
types.
"""
def __call__(
self,
k: NTTree[nt.Kernel]
) -> NTTree[nt.Kernel]:
...
class AnalyticKernelFn(Protocol):
"""A type alias for analytic kernel functions.
A kernel function that computes an analytic kernel. Takes either a
:class:`~neural_tangents.Kernel` or :class:`jax.numpy.ndarray` inputs and a
`get` argument that specifies what quantities should be computed by the
kernel. Returns either a :class:`~neural_tangents.Kernel` object or
:class:`jax.numpy.ndarray`-s for kernels specified by `get`.
"""
def __call__(
self,
x1: KernelOrInput,
x2: Optional[NTTree[np.ndarray]] = None,
get: Get = None,
**kwargs
) -> Union[NTTree[nt.Kernel], NTTree[np.ndarray]]:
...
InternalLayer = Tuple[InitFn, ApplyFn, LayerKernelFn]
class KernelCtor(Protocol):
"""Interface for generating a kernel for a given input dimension."""
def __call__(self, input_dim: int) -> AnalyticKernelFn:
"""Generates a kernel for a given input dimension."""
@dataclasses.dataclass
class MLPKernelCtor(KernelCtor):
"""Generates a GP kernel corresponding to an infinitely-wide MLP."""
num_hidden_layers: int
activation: InternalLayer
def __post_init__(self):
assert self.num_hidden_layers >= 1, 'Must have at least one hidden layer.'
def __call__(self, input_dim: int = 1) -> AnalyticKernelFn:
"""Generates a kernel for a given input dimension."""
limit_width = 50 # Implementation detail of neural_testbed, unused.
layers = [
stax.Dense(limit_width, W_std=1, b_std=1 / np.sqrt(input_dim))
]
for _ in range(self.num_hidden_layers - 1):
layers.append(self.activation)
layers.append(stax.Dense(limit_width, W_std=1, b_std=0))
layers.append(self.activation)
layers.append(stax.Dense(1, W_std=1, b_std=0))
_, _, kernel = stax.serial(*layers)
return kernel
def make_benchmark_kernel(input_dim: int = 1) -> AnalyticKernelFn:
"""Creates the benchmark kernel used in leaderboard = 2-layer ReLU."""
kernel_ctor = MLPKernelCtor(num_hidden_layers=2, activation=stax.Relu())
return kernel_ctor(input_dim)
def make_linear_kernel(input_dim: int = 1) -> AnalyticKernelFn:
"""Generate a linear GP kernel for testing putposes."""
layers = [
stax.Dense(1, W_std=1, b_std=1 / np.sqrt(input_dim)),
]
_, _, kernel = stax.serial(*layers)
return kernel
|
neural_testbed-master
|
neural_testbed/generative/nt_kernels.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GP inference in a classification setting with respect to the environment likelihood."""
from typing import Tuple
import chex
from enn import metrics
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.generative import nt_kernels
class GPClassificationEnvLikelihood(likelihood.GenerativeDataSampler):
"""GP with softmax output, neural_tangent kernel, environment-based inference."""
def __init__(self,
kernel_fn: nt_kernels.AnalyticKernelFn,
x_train: chex.Array,
x_test: chex.Array,
key: chex.PRNGKey,
tau: int = 1,
num_classes: int = 2,
temperature: float = 1,
kernel_ridge: float = 1e-6,
ntk: bool = False):
# Checking the dimensionality of our data coming in.
num_train, input_dim = x_train.shape
num_test_x_cache, input_dim_test = x_test.shape
assert input_dim == input_dim_test
rng = hk.PRNGSequence(key)
self._tau = tau
self._input_dim = input_dim
self._x_train = jnp.array(x_train)
self._x_test = jnp.array(x_test)
self._num_train = num_train
self._num_test_x_cache = num_test_x_cache
self._num_classes = num_classes
# Generate environment function across combined_x = [x_train, x_test]
mean = jnp.zeros(num_train + num_test_x_cache)
get_kernel = 'ntk' if ntk else 'nngp'
combined_x = jnp.vstack([self._x_train, self._x_test])
kernel = kernel_fn(combined_x, x2=None, get=get_kernel)
kernel += kernel_ridge * jnp.eye(len(kernel))
def sample_environment_probs(key: chex.PRNGKey) -> chex.Array:
"""Samples environment class probabilities for the data."""
sample_logit = lambda x: jax.random.multivariate_normal(x, mean, kernel)
sample_all_class_logits = jax.vmap(sample_logit, out_axes=1)
logits = sample_all_class_logits(jax.random.split(key, num_classes))
return jax.nn.softmax(logits / temperature) # [data, classes]
# Class probabilities for each data point.
self._probabilities = sample_environment_probs(next(rng)) # [data, classes]
chex.assert_shape(self._probabilities, [
self._num_train + self._num_test_x_cache,
self._num_classes,
])
# Generate training data.
def sample_output(probs: chex.Array, key: chex.PRNGKey) -> chex.Array:
return jax.random.choice(key, num_classes, p=probs)
train_probs = self._probabilities[:num_train]
train_keys = jax.random.split(next(rng), num_train)
batched_sample = jax.jit(jax.vmap(sample_output))
y_train = batched_sample(train_probs, train_keys)[:, None]
self._train_data = testbed_base.Data(x=self._x_train, y=y_train)
self._test_probs = self._probabilities[num_train:]
@property
def train_data(self) -> testbed_base.Data:
return self._train_data
@property
def test_x(self) -> chex.Array:
return self._x_test
@property
def probabilities(self) -> chex.Array:
return self._probabilities
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates test data and evaluates log likelihood w.r.t. environment.
The test data that is output will be of length tau examples.
We wanted to "pass" tau here... but ran into jax.jit issues.
Args:
key: Random number generator key.
Returns:
Tuple of data (with tau examples) and log-likelihood under posterior.
"""
def sample_test_data(key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
x_key, y_key = jax.random.split(key, 2)
# Be careful about the shapes of these arrays:
chex.assert_shape(
self._test_probs, [self._num_test_x_cache, self._num_classes])
chex.assert_shape(
self._x_test, [self._num_test_x_cache, self._input_dim])
# Sample tau x's from the testing cache for evaluation.
test_x_indices = jax.random.randint(
x_key, [self._tau], 0, self._num_test_x_cache)
# For these x indices, find class probabilities.
probs = self._test_probs[test_x_indices, :]
chex.assert_shape(probs, [self._tau, self._num_classes])
# For these x indices, find the corresponding x test.
x_test = self._x_test[test_x_indices, :]
chex.assert_shape(x_test, [self._tau, self._input_dim])
def sample_output(key: chex.PRNGKey, p: chex.Array) -> chex.Array:
"""Samples a single output for a single key, for single class probs."""
return jax.random.choice(key, self._num_classes, shape=(1,), p=p)
y_keys = jax.random.split(y_key, self._tau)
y_test = jax.vmap(sample_output)(y_keys, probs)
data = testbed_base.Data(x=x_test, y=y_test)
chex.assert_shape(data.x, [self._tau, self._input_dim])
chex.assert_shape(data.y, [self._tau, 1])
# Compute the log likelihood with respect to the environment
log_likelihood = metrics.categorical_log_likelihood(probs, y_test)
return data, log_likelihood
return jax.jit(sample_test_data)(key)
|
neural_testbed-master
|
neural_testbed/generative/gp_classification_envlikelihood.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to sanity-check output in 1D plots."""
from typing import Dict
import chex
import haiku as hk
import jax
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.generative import classification_envlikelihood
from neural_testbed.generative import gp_regression
from neural_testbed.generative import nt_kernels
import numpy as np
import pandas as pd
import plotnine as gg
def set_gg_theme():
"""Sets the global ggplot theme."""
try:
# TODO(author2): Understand why this is causing errors in testing.
gg.theme_set(gg.theme_bw(base_size=16, base_family='serif'))
gg.theme_update(figure_size=(12, 8), panel_spacing=0.5)
except RuntimeError:
pass
def sanity_plots(
true_model: testbed_base.TestbedProblem,
enn_sampler: testbed_base.EpistemicSampler,
) -> Dict[str, gg.ggplot]:
"""Sanity check plots for output of GP testbed output."""
set_gg_theme()
if hasattr(true_model, 'problem'):
true_model = true_model.problem # Removing logging wrappers
prior = true_model.prior_knowledge
# Specialized plotting for the 2D classification infra.
if prior.num_classes == 2 and prior.input_dim == 2:
# TODO(author2): annotate true_model as classification.
if not hasattr(true_model, 'data_sampler'):
raise ValueError('Error in plotting infrastructure.')
problem = true_model.data_sampler
return generate_2d_plots(problem, enn_sampler) # pytype:disable=wrong-arg-types
else:
return {'enn': sanity_1d(true_model, enn_sampler)}
def sanity_1d(true_model: testbed_base.TestbedProblem,
enn_sampler: testbed_base.EpistemicSampler) -> gg.ggplot:
"""Sanity check to plot 1D representation of the GP testbed output."""
set_gg_theme()
if hasattr(true_model, 'problem'):
true_model = true_model.problem # Removing logging wrappers
if not hasattr(true_model, 'data_sampler'):
return gg.ggplot()
if true_model.prior_knowledge.num_classes == 1:
gp_model = true_model.data_sampler
if not isinstance(gp_model, gp_regression.GPRegression):
print('WARNING: no plot implemented')
return gg.ggplot()
return plot_1d_regression(gp_model, enn_sampler)
else:
if not isinstance(true_model, likelihood.SampleBasedTestbed):
raise ValueError('Unrecognised testbed for classification plot.')
return plot_1d_classification(true_model, enn_sampler)
def _gen_samples(enn_sampler: testbed_base.EpistemicSampler,
x: chex.Array,
num_samples: int,
categorical: bool = False) -> pd.DataFrame:
"""Generate posterior samples at x (not implemented for all posterior)."""
# Generate the samples
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for seed in range(num_samples):
net_out = enn_sampler(x, next(rng))
y = jax.nn.softmax(net_out)[:, 1] if categorical else net_out[:, 0]
data.append(pd.DataFrame({'x': x[:, 0], 'y': y, 'seed': seed}))
sample_df = pd.concat(data)
# Aggregate the samples for plotting
def pct_95(x):
return np.percentile(x, 95)
def pct_5(x):
return np.percentile(x, 5)
enn_df = (sample_df.groupby('x')['y']
.agg([np.mean, np.std, pct_5, pct_95]).reset_index())
enn_df = enn_df.rename({'mean': 'y'}, axis=1)
enn_df['method'] = 'enn'
return enn_df
def plot_1d_regression(gp_model: gp_regression.GPRegression,
enn_sampler: testbed_base.EpistemicSampler,
num_samples: int = 100) -> gg.ggplot:
"""Plots 1D regression with confidence intervals."""
# Training data
train_data = gp_model.train_data
df = pd.DataFrame({'x': train_data.x[:, 0], 'y': train_data.y[:, 0]})
# Posterior data
posterior_df = pd.DataFrame({
'x': gp_model.x_test[:, 0],
'y': gp_model.test_mean[:, 0],
'std': np.sqrt(np.diag(gp_model.test_cov)),
})
posterior_df['method'] = 'gp'
# ENN data
enn_df = _gen_samples(enn_sampler, gp_model.x_test, num_samples)
p = (gg.ggplot(pd.concat([posterior_df, enn_df]))
+ gg.aes(x='x', y='y', ymin='y-std', ymax='y+std', group='method')
+ gg.geom_ribbon(gg.aes(fill='method'), alpha=0.25)
+ gg.geom_line(gg.aes(colour='method'), size=2)
+ gg.geom_point(gg.aes(x='x', y='y'), data=df, size=4, inherit_aes=False)
+ gg.scale_colour_manual(['#e41a1c', '#377eb8'])
+ gg.scale_fill_manual(['#e41a1c', '#377eb8'])
)
return p
def plot_1d_classification(true_model, # TODO(author2): add typing
enn_sampler: testbed_base.EpistemicSampler,
num_samples: int = 100) -> gg.ggplot:
"""Plots 1D classification with ENN samples."""
x, y = true_model.train_data
# Pulling out the training data
df = pd.DataFrame({'x': x[:, 0], 'y': y[:, 0]})
# Generate samples from the ENN at 1000 randomly generated test datapoints.
def gen_test(key: chex.PRNGKey) -> testbed_base.Data:
data, _ = true_model.data_sampler.test_data(key)
return testbed_base.Data(x=data.x[0, :], y=data.y[0, :])
data_keys = jax.random.split(jax.random.PRNGKey(seed=0), 1000)
data = jax.jit(jax.vmap(gen_test))(data_keys)
enn_df = _gen_samples(enn_sampler, data.x, num_samples, categorical=True)
# Calculate the true function distribution
x = true_model.data_sampler.test_x
_, input_dim = x.shape
prob_df = pd.DataFrame({
'x': x[:, 0],
'y': true_model.data_sampler.probabilities[:, 1],
})
prob_df['std'] = 0
prob_df['method'] = 'true_function'
p = (gg.ggplot(pd.concat([prob_df, enn_df]))
+ gg.aes(x='x', y='y', ymin='pct_5', ymax='pct_95', group='method')
+ gg.geom_hline(yintercept=0, alpha=0.2, linetype='dashed')
+ gg.geom_hline(yintercept=1, alpha=0.2, linetype='dashed')
+ gg.geom_ribbon(gg.aes(fill='method'), alpha=0.25)
+ gg.geom_line(gg.aes(colour='method'), size=2)
+ gg.geom_point(gg.aes(x='x', y='y'), data=df[df.y == 1],
size=5, colour='#377eb8', inherit_aes=False)
+ gg.geom_point(gg.aes(x='x', y='y'), data=df[df.y == 0],
size=5, colour='#e41a1c', inherit_aes=False)
+ gg.scale_colour_manual(['green', 'black'])
+ gg.scale_fill_manual(['green', 'black'])
+ gg.ylab('probability of class 1')
+ gg.xlab(f'x[0] of {input_dim}-dimensional input.')
)
return p
def investigate_1d_regression_model(
kernel_fn: nt_kernels.AnalyticKernelFn = nt_kernels.make_benchmark_kernel(),
num_train: int = 5) -> gg.ggplot:
"""Plots the 1D posterior for random training data in regression model.
This plot is effectively a poor-man's test... just to be able to visually
inspect the qualitative behaviour of the 1D regression posterior.
Args:
kernel_fn: kernel function defining the GP.
num_train: number of training points.
Returns:
gg.ggplot investigation of 1D posterior.
"""
x_test = np.random.randn(1000, 1)
data_sampler = gp_regression.GPRegression(
kernel_fn,
x_train=np.random.randn(num_train, 1),
x_test=x_test,
key=jax.random.PRNGKey(13),
noise_std=0.3,
tau=100,
)
train_data = data_sampler.train_data
df = pd.DataFrame({'x': train_data.x[:, 0], 'y': train_data.y[:, 0]})
plt_df = pd.DataFrame({
'x': x_test[:, 0],
'mean': data_sampler._test_mean[:, 0], # pylint:disable=protected-access
'std': np.sqrt(np.diag(data_sampler._test_cov)), # pylint:disable=protected-access
})
p = (gg.ggplot(plt_df)
+ gg.aes(x='x')
+ gg.geom_line(gg.aes(y='mean', ymin='mean-std', ymax='mean+std'),
colour='red', size=2)
+ gg.geom_ribbon(gg.aes(y='mean', ymin='mean-std', ymax='mean+std'),
alpha=0.25, fill='red')
+ gg.geom_point(gg.aes(y='y'), data=df, size=3))
return p
############################################################
# Specialized plots for 2D problems
BLUE = '#084594'
RED = '#e41a1c'
def gen_2d_grid(plot_range: float) -> np.ndarray:
"""Generates a 2D grid for data in a certain_range."""
data = []
x_range = np.linspace(-plot_range, plot_range)
for x1 in x_range:
for x2 in x_range:
data.append((x1, x2))
return np.vstack(data)
def _gen_samples_2d(enn_sampler: testbed_base.EpistemicSampler,
x: chex.Array,
num_samples: int,
categorical: bool = False) -> pd.DataFrame:
"""Generate posterior samples at x (not implemented for all posterior)."""
# Generate the samples
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for seed in range(num_samples):
net_out = enn_sampler(x, next(rng))
y = jax.nn.softmax(net_out)[:, 1] if categorical else net_out[:, 0]
df = pd.DataFrame({'x0': x[:, 0], 'x1': x[:, 1], 'y': y, 'seed': seed})
data.append(df)
return pd.concat(data)
def _agg_samples_2d(sample_df: pd.DataFrame) -> pd.DataFrame:
"""Aggregate ENN samples for plotting."""
def pct_95(x):
return np.percentile(x, 95)
def pct_5(x):
return np.percentile(x, 5)
enn_df = (sample_df.groupby(['x0', 'x1'])['y']
.agg([np.mean, np.std, pct_5, pct_95]).reset_index())
enn_df = enn_df.rename({'mean': 'y'}, axis=1)
enn_df['method'] = 'enn'
return enn_df
def _gen_problem_2d(
problem: classification_envlikelihood.ClassificationEnvLikelihood,
x: chex.Array,
) -> pd.DataFrame:
"""Generate underlying problem dataset."""
assert x.shape[1] == 2
logits = problem._logit_fn(x) # pylint:disable=protected-access
test_probs = jax.nn.softmax(logits)[:, 1]
np_data = np.hstack([x, test_probs[:, None]])
problem_df = pd.DataFrame(np_data, columns=['x0', 'x1', 'y'])
problem_df['method'] = 'true_function'
return problem_df
def _make_train_2d(
problem: classification_envlikelihood.ClassificationEnvLikelihood):
data = problem.train_data
return pd.DataFrame(np.hstack([data.x, data.y]), columns=['x0', 'x1', 'y'])
def _plot_default_2d(problem_df: pd.DataFrame,
enn_df: pd.DataFrame,
train_df: pd.DataFrame) -> gg.ggplot:
"""Side-by-side plot comparing ENN and true function."""
p = (gg.ggplot(pd.concat([problem_df, enn_df]))
+ gg.aes(x='x0', y='x1', fill='y')
+ gg.geom_tile()
+ gg.geom_point(data=train_df, size=3, stroke=1.5, alpha=0.7)
+ gg.scale_fill_gradient2(BLUE, 'white', RED, midpoint=0.5)
+ gg.facet_wrap('method')
+ gg.theme(figure_size=(12, 5))
+ gg.ggtitle('Comparing ENN and true probabilities')
)
return p
def _plot_expanded_2d(problem_df: pd.DataFrame,
enn_df: pd.DataFrame,
train_df: pd.DataFrame) -> gg.ggplot:
"""Side-by-side plot comparing ENN and true function with pct_5, pct_95."""
plt_df = pd.melt(enn_df, id_vars=['x0', 'x1'],
value_vars=['y', 'pct_5', 'pct_95'])
plt_df['variable'] = plt_df.variable.apply(lambda x: 'enn:' + x)
problem_df['value'] = problem_df['y']
problem_df['variable'] = 'true_function'
p = (gg.ggplot(pd.concat([problem_df, plt_df]))
+ gg.aes(x='x0', y='x1', fill='value')
+ gg.geom_tile()
+ gg.geom_point(gg.aes(fill='y'), data=train_df, size=3, stroke=1.5,
alpha=0.7)
+ gg.scale_fill_gradient2(BLUE, 'white', RED, midpoint=0.5)
+ gg.facet_wrap('variable')
+ gg.theme(figure_size=(12, 10))
+ gg.ggtitle('Comparing ENN and true probabilities'))
return p
def _plot_error_2d(problem_df: pd.DataFrame, enn_df: pd.DataFrame,
train_df: pd.DataFrame) -> gg.ggplot:
"""Single plot of error in ENN."""
plt_df = pd.merge(
enn_df, problem_df, on=['x0', 'x1'], suffixes=('_enn', '_problem'))
p = (gg.ggplot(plt_df)
+ gg.aes(x='x0', y='x1', fill='y_problem - y_enn')
+ gg.scale_fill_gradient2(BLUE, 'white', RED, midpoint=0)
+ gg.geom_tile()
+ gg.geom_point(gg.aes(x='x0', y='x1', fill='y'), data=train_df, size=3,
stroke=1.5, inherit_aes=False, show_legend=False)
+ gg.theme(figure_size=(7, 5))
+ gg.ggtitle('Error in ENN mean estimation')
)
return p
def _plot_std_2d(enn_df: pd.DataFrame,
train_df: pd.DataFrame) -> gg.ggplot:
"""Single plot of standard deviation in ENN predications."""
p = (gg.ggplot(enn_df)
+ gg.aes(x='x0', y='x1', fill='std')
+ gg.scale_fill_gradient2('white', '#005a32', '#ffff33', midpoint=0.1)
+ gg.geom_tile()
+ gg.geom_point(gg.aes(x='x0', y='x1', colour='y'), data=train_df,
size=3, inherit_aes=False, show_legend=False, alpha=0.7)
+ gg.scale_colour_gradient(BLUE, RED, limits=[0, 1])
+ gg.theme(figure_size=(7, 5))
+ gg.ggtitle('Standard deviation in ENN predications')
)
return p
def _plot_enn_samples_2d(sample_df: pd.DataFrame,
train_df: pd.DataFrame) -> gg.ggplot:
"""Plot realizations of enn samples."""
p = (gg.ggplot(sample_df)
+ gg.aes(x='x0', y='x1', fill='y')
+ gg.geom_tile()
+ gg.geom_point(data=train_df, size=3, stroke=1.5)
+ gg.scale_fill_gradient2(BLUE, 'white', RED, midpoint=0.5)
+ gg.facet_wrap('seed', labeller='label_both')
+ gg.theme(figure_size=(18, 12), panel_spacing=0.1)
+ gg.ggtitle('ENN sample realizations')
)
return p
def generate_2d_plots(
true_model: classification_envlikelihood.ClassificationEnvLikelihood,
enn_sampler: testbed_base.EpistemicSampler,
num_samples: int = 20) -> Dict[str, gg.ggplot]:
"""Generates a sequence of plots for debugging."""
x = gen_2d_grid(3)
sample_df = _gen_samples_2d(enn_sampler, x, num_samples, categorical=True)
enn_df = _agg_samples_2d(sample_df)
problem_df = _gen_problem_2d(true_model, x)
train_df = _make_train_2d(true_model)
return {
'enn': _plot_default_2d(problem_df, enn_df, train_df),
'more_enn': _plot_expanded_2d(problem_df, enn_df, train_df),
'err_enn': _plot_error_2d(problem_df, enn_df, train_df),
'std_enn': _plot_std_2d(enn_df, train_df),
'sample_enn': _plot_enn_samples_2d(sample_df, train_df),
}
|
neural_testbed-master
|
neural_testbed/generative/plotting.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.generative.gp_regression."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax.config
from neural_testbed.generative import gp_regression
from neural_testbed.generative import nt_kernels
import numpy as np
# This is necessary to prevent nans.
# TODO(author6): Look into fixing the nans and removing this.
jax.config.update('jax_enable_x64', True)
class GPRegressionTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([3, 10], [1, 3], [1, 3]))
def test_valid_data(self, num_train: int, input_dim: int, tau: int):
np.random.seed(0)
noise_std = 0.1
rng = hk.PRNGSequence(0)
gp_model = gp_regression.GPRegression(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(10, input_dim),
key=next(rng),
tau=tau,
noise_std=noise_std,
)
# Check that the training data is reasonable.
train_data = gp_model.train_data
assert train_data.x.shape == (num_train, input_dim)
assert train_data.y.shape == (num_train, 1)
assert np.all(~np.isnan(train_data.x))
assert np.all(~np.isnan(train_data.y))
# Check that the testing data is reasonable.
for _ in range(2):
test_data, log_likelihood = gp_model.test_data(next(rng))
assert np.isfinite(log_likelihood)
assert test_data.x.shape == (tau, input_dim)
assert test_data.y.shape == (tau, 1)
assert np.all(~np.isnan(test_data.x))
assert np.all(~np.isnan(test_data.y))
@parameterized.parameters(itertools.product([1, 10, 20], [10, 20]))
def test_not_all_test_data_same_x(self, num_train: int, num_test: int):
"""Generates testing data and checks not all the same x value."""
np.random.seed(0)
num_test_seeds = 10
input_dim = 2
rng = hk.PRNGSequence(0)
gp_model = gp_regression.GPRegression(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(num_test, input_dim),
key=next(rng),
tau=1,
)
num_distinct_x = 0
reference_data, _ = gp_model.test_data(key=next(rng))
for _ in range(num_test_seeds):
test_data, _ = gp_model.test_data(key=next(rng))
if not np.all(np.isclose(test_data.x, reference_data.x)):
num_distinct_x += 1
assert num_distinct_x > 0
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/gp_regression_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.generative.gp_regression_envlikelihood."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from neural_testbed.generative import gp_regression_envlikelihood
from neural_testbed.generative import nt_kernels
import numpy as np
class GPRegressionTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([3, 10], [1, 3], [1, 3]))
def test_valid_data(self, num_train: int, input_dim: int, tau: int):
np.random.seed(0)
noise_std = 0.1
rng = hk.PRNGSequence(0)
gp_model = gp_regression_envlikelihood.GPRegressionEnvLikelihood(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(10, input_dim),
key=next(rng),
tau=tau,
noise_std=noise_std,
)
# Check that the training data is reasonable.
train_data = gp_model.train_data
assert train_data.x.shape == (num_train, input_dim)
assert train_data.y.shape == (num_train, 1)
assert np.all(~np.isnan(train_data.x))
assert np.all(~np.isnan(train_data.y))
# Check that the testing data is reasonable.
for _ in range(3):
test_data, log_likelihood = gp_model.test_data(next(rng))
assert np.isfinite(log_likelihood)
assert test_data.x.shape == (tau, input_dim)
assert test_data.y.shape == (tau, 1)
assert np.all(~np.isnan(test_data.x))
assert np.all(~np.isnan(test_data.y))
@parameterized.parameters(itertools.product([1, 10, 100], [10, 20]))
def test_not_all_test_data_same_x(self, num_train: int, num_test: int):
"""Generates testing data and checks not all the same x value."""
np.random.seed(0)
num_test_seeds = 10
input_dim = 2
rng = hk.PRNGSequence(0)
gp_model = gp_regression_envlikelihood.GPRegressionEnvLikelihood(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(num_test, input_dim),
key=next(rng),
tau=1,
)
num_distinct_x = 0
reference_data, _ = gp_model.test_data(key=next(rng))
for _ in range(num_test_seeds):
test_data, _ = gp_model.test_data(key=next(rng))
if not np.all(np.isclose(test_data.x, reference_data.x)):
num_distinct_x += 1
assert num_distinct_x > 0
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/gp_regression_envlikelihood_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of generative data models."""
# Classification w.r.t environment likelihood
from neural_testbed.generative.classification_envlikelihood import ClassificationEnvLikelihood
from neural_testbed.generative.classification_envlikelihood import LogitFn
from neural_testbed.generative.classification_envlikelihood import make_gaussian_sampler
from neural_testbed.generative.classification_envlikelihood import make_polyadic_sampler
from neural_testbed.generative.classification_envlikelihood import make_weibull_sampler
from neural_testbed.generative.classification_envlikelihood import XGenerator
# Factories
from neural_testbed.generative.factories import make_2layer_mlp_logit_fn
from neural_testbed.generative.factories import make_filtered_gaussian_data
# Classification with GP likelihood
from neural_testbed.generative.gp_classification_envlikelihood import GPClassificationEnvLikelihood
# Regression
from neural_testbed.generative.gp_regression import GPRegression
from neural_testbed.generative.gp_regression import TestbedGPRegression
# Regression w.r.t environment likelihood
from neural_testbed.generative.gp_regression_envlikelihood import GPRegressionEnvLikelihood
# Neural tangents kernels
from neural_testbed.generative.nt_kernels import KernelCtor
from neural_testbed.generative.nt_kernels import make_benchmark_kernel
from neural_testbed.generative.nt_kernels import make_linear_kernel
from neural_testbed.generative.nt_kernels import MLPKernelCtor
# Plotting
from neural_testbed.generative.plotting import generate_2d_plots
from neural_testbed.generative.plotting import sanity_1d
from neural_testbed.generative.plotting import sanity_plots
|
neural_testbed-master
|
neural_testbed/generative/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GP inference in a regression setting with respect to the environment likelihood."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.generative import nt_kernels
class GPRegressionEnvLikelihood(likelihood.GenerativeDataSampler):
"""GP with gaussian noise output."""
def __init__(self,
kernel_fn: nt_kernels.AnalyticKernelFn,
x_train: chex.Array,
x_test: chex.Array,
key: chex.PRNGKey,
tau: int = 1,
noise_std: float = 1,
kernel_ridge: float = 1e-6,
ntk: bool = False):
# Checking the dimensionality of our data coming in.
num_train, input_dim = x_train.shape
num_test_x_cache, input_dim_test = x_test.shape
assert input_dim == input_dim_test
rng = hk.PRNGSequence(key)
self._tau = tau
self._input_dim = input_dim
self._x_train = jnp.array(x_train)
self._x_test = jnp.array(x_test)
self._num_train = num_train
self._num_test_x_cache = num_test_x_cache
self._noise_std = noise_std
self._kernel_ridge = kernel_ridge
# Generate environment function across combined_x = [x_train, x_test]
mean = jnp.zeros(num_train + num_test_x_cache)
get_kernel = 'ntk' if ntk else 'nngp'
combined_x = jnp.vstack([self._x_train, self._x_test])
kernel = kernel_fn(combined_x, x2=None, get=get_kernel)
kernel += kernel_ridge * jnp.eye(len(kernel))
y_function = jax.random.multivariate_normal(next(rng), mean, kernel)
chex.assert_shape(y_function, [num_train + num_test_x_cache,])
# Form the training data
y_noise = jax.random.normal(next(rng), [num_train, 1]) * noise_std
y_train = y_function[:num_train, None] + y_noise
self._train_data = testbed_base.Data(x_train, y_train)
chex.assert_shape(y_train, [num_train, 1])
# Form the testing data
self._y_test_function = y_function[-num_test_x_cache:]
chex.assert_shape(self._y_test_function, [num_test_x_cache,])
@property
def x_test(self) -> chex.Array:
return self._x_test
@property
def train_data(self) -> testbed_base.Data:
return self._train_data
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates test data and evaluates log likelihood w.r.t. environment.
The test data that is output will be of length tau examples.
We wanted to "pass" tau here... but ran into jax.jit issues.
Args:
key: Random number generator key.
Returns:
Tuple of data (with tau examples) and log-likelihood under posterior.
"""
def sample_test_data(key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
x_key, y_key = jax.random.split(key, 2)
# Sample tau x's from the testing cache for evaluation
test_x_indices = jax.random.randint(
x_key, [self._tau], 0, self._num_test_x_cache)
x_test = self._x_test[test_x_indices]
chex.assert_shape(x_test, [self._tau, self._input_dim])
# Sample y_function for the test data
y_function = self._y_test_function[test_x_indices]
y_noise = jax.random.normal(y_key, [self._tau, 1]) * self._noise_std
y_test = y_function[:, None] + y_noise
data = testbed_base.Data(x_test, y_test)
chex.assert_shape(y_test, [self._tau, 1])
# Compute the log likelihood with respect to the environment
err = y_noise
chex.assert_shape(err, [self._tau, 1])
cov = self._noise_std ** 2 * jnp.eye(self._tau)
chex.assert_shape(cov, [self._tau, self._tau])
log_likelihood = likelihood.gaussian_log_likelihood(err, cov)
return data, log_likelihood
return jax.jit(sample_test_data)(key)
|
neural_testbed-master
|
neural_testbed/generative/gp_regression_envlikelihood.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Convenient factory methods to help build generative models."""
from typing import Callable
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed.generative import classification_envlikelihood as class_env
def make_2layer_mlp_logit_fn(
input_dim: int,
temperature: float,
hidden: int,
num_classes: int,
key: chex.PRNGKey,
activation: Callable[[chex.Array], chex.Array] = jax.nn.relu,
) -> class_env.LogitFn:
"""Factory method to create a generative model around a 2-layer MLP."""
# Generating the logit function
def net_fn(x: chex.Array) -> chex.Array:
"""Defining the generative model MLP."""
y = hk.Linear(
output_size=hidden,
b_init=hk.initializers.RandomNormal(1./jnp.sqrt(input_dim)),
)(x)
y = activation(y)
y = hk.Linear(hidden)(y)
y = activation(y)
return hk.Linear(num_classes)(y)
transformed = hk.without_apply_rng(hk.transform(net_fn))
dummy_input = jnp.zeros([1, input_dim])
params = transformed.init(key, dummy_input)
def forward(x: chex.Array) -> chex.Array:
return transformed.apply(params, x) / temperature
logit_fn = jax.jit(forward)
return logit_fn
def make_filtered_gaussian_data(
input_dim: int,
logit_fn: class_env.LogitFn,
reject_prob: float,
fraction_rejected_classes: float,
num_samples: int,
key: chex.PRNGKey,
max_itr: int = 30) -> testbed_base.Data:
"""Make a gaussian sampler that filters samples based on class labels."""
# TODO(author2): WARNING - you cannot jit this function!
def sample_gaussian_data(num_samples, key):
data, _ = class_env.sample_gaussian_data(
logit_fn=logit_fn,
x_generator=class_env.make_gaussian_sampler(input_dim),
num_train=num_samples,
key=key,)
return data
rng = hk.PRNGSequence(key)
dummy_logits = logit_fn(jnp.zeros([10, input_dim]))
num_classes = dummy_logits.shape[1]
num_rejected_classes = int(fraction_rejected_classes * num_classes)
if num_rejected_classes == 0 or reject_prob == 0:
return sample_gaussian_data(num_samples, next(rng))
rejected_classes = jax.random.randint(
next(rng), shape=(num_rejected_classes,), minval=0, maxval=num_classes)
x_all = []
y_all = []
itr = 0
total_samples = 0
samples_per_itr = num_samples * 2
while (total_samples < num_samples) and (itr < max_itr):
data = sample_gaussian_data(samples_per_itr, next(rng))
x, y = data.x, data.y
mask_reject = jnp.isin(y.squeeze(), rejected_classes)
uniform_probs = jax.random.uniform(next(rng), shape=(samples_per_itr,))
mask_reject = mask_reject & (uniform_probs < reject_prob)
x = x[~mask_reject]
y = y[~mask_reject]
x_all.append(x)
y_all.append(y)
itr += 1
total_samples += jnp.sum(~mask_reject)
if total_samples < num_samples:
raise ValueError('Failed to sample required number of input data.')
x_samples = jnp.concatenate(x_all)
y_samples = jnp.concatenate(y_all)
return testbed_base.Data(x_samples[:num_samples], y_samples[:num_samples])
|
neural_testbed-master
|
neural_testbed/generative/factories.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GP regression testbed problem.
Uses the neural_tangent library to compute the posterior mean and covariance
for regression problem in closed form.
"""
import dataclasses
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import neural_tangents as nt
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
from neural_testbed.generative import nt_kernels
class GPRegression(likelihood.GenerativeDataSampler):
"""GP with gaussian noise output."""
def __init__(self,
kernel_fn: nt_kernels.AnalyticKernelFn,
x_train: chex.Array,
x_test: chex.Array,
key: chex.PRNGKey,
tau: int = 1,
noise_std: float = 1,
kernel_ridge: float = 1e-6,
ntk: bool = False):
# Checking the dimensionality of our data coming in.
num_train, input_dim = x_train.shape
num_test_x_cache, input_dim_test = x_test.shape
assert input_dim == input_dim_test
rng = hk.PRNGSequence(key)
self._tau = tau
self._input_dim = input_dim
self._x_train = jnp.array(x_train)
self._x_test = jnp.array(x_test)
self._num_train = num_train
self._num_test_x_cache = num_test_x_cache
self._noise_std = noise_std
self._kernel_ridge = kernel_ridge
# Form the training data
mean = jnp.zeros(num_train)
k_train_train = kernel_fn(self._x_train, x2=None, get='nngp')
k_train_train += kernel_ridge * jnp.eye(num_train)
y_function = jax.random.multivariate_normal(next(rng), mean, k_train_train)
y_noise = jax.random.normal(next(rng), [num_train, 1]) * noise_std
y_train = y_function[:, None] + y_noise
self._train_data = testbed_base.Data(x_train, y_train)
chex.assert_shape(y_train, [num_train, 1])
# Form the posterior prediction at cached test data
predict_fn = nt.predict.gradient_descent_mse_ensemble(
kernel_fn, x_train, y_train, diag_reg=(noise_std**2))
self._test_mean, self._test_cov = predict_fn(
t=None, x_test=self._x_test, get='nngp', compute_cov=True)
self._test_cov += kernel_ridge * jnp.eye(num_test_x_cache)
chex.assert_shape(self._test_mean, [num_test_x_cache, 1])
chex.assert_shape(self._test_cov, [num_test_x_cache, num_test_x_cache])
@property
def x_test(self) -> chex.Array:
return self._x_test
@property
def test_mean(self) -> chex.Array:
return self._test_mean
@property
def test_cov(self) -> chex.Array:
return self._test_cov
@property
def train_data(self) -> testbed_base.Data:
return self._train_data
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates test data and evaluates log likelihood under posterior.
The test data that is output will be of length tau examples.
We wanted to "pass" tau here but ran into jax.jit issues.
Args:
key: Random number generator key.
Returns:
Tuple of data (with tau examples) and log-likelihood under posterior.
"""
def sample_test_data(key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
x_key, fn_key, y_key = jax.random.split(key, 3)
# Sample tau x's from the testing cache for evaluation
test_x_indices = jax.random.randint(
x_key, [self._tau], 0, self._num_test_x_cache)
x_test = self._x_test[test_x_indices]
chex.assert_shape(x_test, [self._tau, self._input_dim])
# Sample the true function from the posterior mean
nngp_mean = self._test_mean[test_x_indices, 0]
chex.assert_shape(nngp_mean, [self._tau])
nngp_cov = self._test_cov[jnp.ix_(test_x_indices, test_x_indices)]
chex.assert_shape(nngp_cov, [self._tau, self._tau])
sampled_fn = jax.random.multivariate_normal(fn_key, nngp_mean, nngp_cov)
y_noise = jax.random.normal(y_key, [self._tau, 1]) * self._noise_std
y_test = sampled_fn[:, None] + y_noise
data = testbed_base.Data(x_test, y_test)
chex.assert_shape(y_test, [self._tau, 1])
# Compute the log likelihood (under both posterior and noise)
err = y_test - nngp_mean[:, None]
chex.assert_shape(err, [self._tau, 1])
cov = nngp_cov + self._noise_std ** 2 * jnp.eye(self._tau)
chex.assert_shape(cov, [self._tau, self._tau])
log_likelihood = likelihood.gaussian_log_likelihood(err, cov)
return data, log_likelihood
return jax.jit(sample_test_data)(key)
@dataclasses.dataclass
class TestbedGPRegression(testbed_base.TestbedProblem):
"""Wraps GPRegression sampler for testbed with exact posterior inference."""
data_sampler: GPRegression
prior: testbed_base.PriorKnowledge
key: chex.PRNGKey
num_enn_samples: int = 100
std_ridge: float = 1e-3
@property
def train_data(self) -> testbed_base.Data:
return self.data_sampler.train_data
@property
def prior_knowledge(self) -> testbed_base.PriorKnowledge:
return self.prior
def evaluate_quality(
self,
enn_sampler: testbed_base.EpistemicSampler) -> testbed_base.ENNQuality:
"""Computes KL estimate on mean functions for tau=1 only."""
# Extract useful quantities from the gp sampler.
x_test = self.data_sampler.x_test
num_test = x_test.shape[0]
posterior_mean = self.data_sampler.test_mean[:, 0]
posterior_std = jnp.sqrt(jnp.diag(self.data_sampler.test_cov))
posterior_std += self.std_ridge
# Compute the mean and std of ENN posterior
batched_sampler = jax.jit(jax.vmap(enn_sampler, in_axes=[None, 0]))
enn_keys = jax.random.split(self.key, self.num_enn_samples)
enn_samples = batched_sampler(x_test, enn_keys)
enn_samples = enn_samples[:, :, 0]
chex.assert_shape(enn_samples, [self.num_enn_samples, num_test])
enn_mean = jnp.mean(enn_samples, axis=0)
enn_std = jnp.std(enn_samples, axis=0) + self.std_ridge
# Compute the KL divergence between this and reference posterior
batched_kl = jax.jit(jax.vmap(_kl_gaussian))
kl_estimates = batched_kl(posterior_mean, posterior_std, enn_mean, enn_std)
chex.assert_shape(kl_estimates, [num_test])
kl_estimate = jnp.mean(kl_estimates)
return testbed_base.ENNQuality(kl_estimate)
def _kl_gaussian(
mean_1: float, std_1: float, mean_2: float, std_2: float) -> float:
"""Computes the KL(P_1 || P_2) for P_1,P_2 univariate Gaussian."""
log_term = jnp.log(std_2 / std_1)
frac_term = (std_1 ** 2 + (mean_1 - mean_2) ** 2) / (2 * std_2 ** 2)
return log_term + frac_term - 0.5 # pytype: disable=bad-return-type # jax-types
|
neural_testbed-master
|
neural_testbed/generative/gp_regression.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for efficient_agent.neural_testbed.generative.plotting."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
from neural_testbed.generative import plotting
from neural_testbed.leaderboard import load
from neural_testbed.leaderboard import sweep
def regression_sampler(x: chex.Array, key: chex.PRNGKey) -> chex.Array:
mean = jax.random.normal(key)
return jnp.ones([x.shape[0], 1]) * mean
def classification_sampler(x: chex.Array,
key: chex.PRNGKey) -> chex.Array:
del key
return jnp.zeros([x.shape[0], 2])
class PlottingTest(parameterized.TestCase):
@parameterized.parameters([[x] for x in sweep.CLASSIFICATION_2D_TEST])
def test_2d_classification(self, problem_id: str):
"""Check that the 1d classification plot doesn't fail."""
problem = load.problem_from_id(problem_id)
_ = plotting.sanity_plots(problem, classification_sampler)
@parameterized.parameters([[x] for x in sweep.REGRESSION_TEST])
def test_1d_regression(self, problem_id: str):
"""Check that the 1d regression plot doesn't fail."""
problem = load.problem_from_id(problem_id)
_ = plotting.sanity_plots(problem, regression_sampler)
@parameterized.parameters([[x] for x in sweep.ENN_PAPER_TEST])
def test_1d_enn_paper(self, problem_id: str):
"""Check that the 1d enn_paper plot doesn't fail."""
problem = load.problem_from_id(problem_id)
_ = plotting.sanity_plots(problem, regression_sampler)
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/plotting_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Classification-based testbed based around a logit_fn and x_generator."""
from typing import Callable, Optional, Tuple
import chex
from enn import metrics
import haiku as hk
import jax
import jax.numpy as jnp
from neural_testbed import base as testbed_base
from neural_testbed import likelihood
LogitFn = Callable[[chex.Array], chex.Array] # x -> logits
XGenerator = Callable[[chex.PRNGKey, int], chex.Array] # key, num_samples -> x
class ClassificationEnvLikelihood(likelihood.GenerativeDataSampler):
"""Classification-based environment-based inference."""
def __init__(self,
logit_fn: LogitFn,
x_train_generator: XGenerator,
x_test_generator: XGenerator,
num_train: int,
key: chex.PRNGKey,
override_train_data: Optional[testbed_base.Data] = None,
tau: int = 1):
rng = hk.PRNGSequence(key)
self._logit_fn = logit_fn
self._tau = tau
self._x_test_generator = x_test_generator
self._num_train = num_train
# Optionally override training data where you want to allow for training
# data that was *not* generated by the x_generator, logit_fn.
if override_train_data is None:
self._train_data, _ = sample_gaussian_data(
logit_fn, x_train_generator, num_train, next(rng))
else:
assert num_train == override_train_data.x.shape[0]
assert num_train == override_train_data.y.shape[0]
self._train_data = override_train_data
# Generate canonical x_test for DEBUGGING ONLY!!!
num_test = 1000
self._x_test = self._x_test_generator(next(rng), num_test)
test_logits = self._logit_fn(self._x_test) # [n_train, n_class]
chex.assert_shape(test_logits, [num_test, None])
self._test_probs = jax.nn.softmax(test_logits)
@property
def train_data(self) -> testbed_base.Data:
return self._train_data
@property
def test_x(self) -> chex.Array:
"""Canonical test data for debugging only.
This is not the test data x returned by the test data method.
"""
return self._x_test
@property
def probabilities(self) -> chex.Array:
"""Return probabilities of classes for canonical test x.
Use only for debugging/plotting purposes in conjunction with the test_x
method. The test_data method does not use the same test_x.
"""
return self._test_probs
def test_data(self, key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates test data and evaluates log likelihood w.r.t. environment.
The test data that is output will be of length tau examples.
We wanted to "pass" tau here... but ran into jax.jit issues.
Args:
key: Random number generator key.
Returns:
Tuple of data (with tau examples) and log-likelihood under posterior.
"""
def sample_test(k: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
return sample_gaussian_data(
self._logit_fn, self._x_test_generator, self._tau, key=k)
return jax.jit(sample_test)(key)
def make_gaussian_sampler(input_dim: int) -> XGenerator:
def gaussian_generator(key: chex.PRNGKey, num_samples: int) -> chex.Array:
return jax.random.normal(key, [num_samples, input_dim])
return gaussian_generator
# TODO(author2): Migrate to experimental directory.
def make_weibull_sampler(input_dim: int) -> XGenerator:
"""Returns Weibull sampler around initial reference point."""
# TODO(author3): Expose concentration and scale as parameters
concentration = jnp.log10(jnp.log2(10))
scale = 1 / (jnp.log(10)**(1 / concentration))
def weibull_generator(key: chex.PRNGKey, tau: int) -> chex.Array:
key_ref, key_dist, key_perturb = jax.random.split(key, 3)
x_ref = jax.random.normal(key_ref, [input_dim])
distances = jnp.concatenate([
jnp.zeros(1),
jax.random.weibull_min(key_dist, scale, concentration, shape=[tau - 1])
])
chex.assert_shape(distances, [tau])
perturbations = jax.random.normal(key_perturb, [tau, input_dim])
x_test = x_ref + jnp.einsum('ij, i -> ij', perturbations, distances)
chex.assert_shape(x_test, [tau, input_dim])
return x_test
return weibull_generator
def make_polyadic_sampler(input_dim: int, kappa: int = 2) -> XGenerator:
"""Samples with local structure centered around kappa N(0, 1) anchor points.
To make this work in jax we actually implement this by first sampling kappa
anchor points, then randomly the tau batch points from these kappa anchors
(with replacement) and then adding noise.
Args:
input_dim: input dimension.
kappa: number of anchor reference points. If tau is less than kappa we
default to sampling tau points.
Returns:
Polyadic sampling XGenerator.
"""
def polyadic_generator(key: chex.PRNGKey, tau: int) -> chex.Array:
anchor_key, sample_key = jax.random.split(key)
# Sample anchor points
anchor_x = jax.random.normal(anchor_key, [kappa, input_dim])
# Index into these points
sample_idx = jax.random.randint(sample_key, [tau], 0, kappa)
repeat_x = anchor_x[sample_idx]
chex.assert_shape(repeat_x, [tau, input_dim])
return repeat_x
return polyadic_generator
def sample_gaussian_data(logit_fn: LogitFn,
x_generator: XGenerator,
num_train: int,
key: chex.PRNGKey) -> Tuple[testbed_base.Data, float]:
"""Generates training data for given problem."""
x_key, y_key = jax.random.split(key, 2)
# Checking the dimensionality of our data coming in.
x_train = x_generator(x_key, num_train)
input_dim = x_train.shape[1]
chex.assert_shape(x_train, [num_train, input_dim])
# Generate environment function across x_train
train_logits = logit_fn(x_train) # [n_train, n_class]
num_classes = train_logits.shape[-1] # Obtain from logit_fn.
chex.assert_shape(train_logits, [num_train, num_classes])
train_probs = jax.nn.softmax(train_logits)
# Generate training data.
def sample_output(probs: chex.Array, key: chex.PRNGKey) -> chex.Array:
return jax.random.choice(key, num_classes, shape=(1,), p=probs)
y_keys = jax.random.split(y_key, num_train)
y_train = jax.vmap(sample_output)(train_probs, y_keys)
data = testbed_base.Data(x=x_train, y=y_train)
# Compute the log likelihood with respect to the environment
log_likelihood = metrics.categorical_log_likelihood(train_probs, y_train)
return data, log_likelihood
|
neural_testbed-master
|
neural_testbed/generative/classification_envlikelihood.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.generative.gp_classification_envlikelihood."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from neural_testbed.generative import gp_classification_envlikelihood
from neural_testbed.generative import nt_kernels
import numpy as np
class GPClassificationEnsembleTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([3, 10], [1, 3], [1, 3]))
def test_valid_data(self, num_train: int, input_dim: int, tau: int):
np.random.seed(0)
num_classes = 2
rng = hk.PRNGSequence(0)
gp_model = gp_classification_envlikelihood.GPClassificationEnvLikelihood(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(10, input_dim),
key=next(rng),
tau=tau,
num_classes=num_classes,
)
# Check that the training data is reasonable.
train_data = gp_model.train_data
assert train_data.x.shape == (num_train, input_dim)
assert train_data.y.shape == (num_train, 1)
assert np.all(~np.isnan(train_data.x))
assert np.all(~np.isnan(train_data.y))
# Check that the testing data is reasonable.
for _ in range(3):
test_data, log_likelihood = gp_model.test_data(next(rng))
assert np.isfinite(log_likelihood)
assert test_data.x.shape == (tau, input_dim)
assert test_data.y.shape == (tau, 1)
assert np.all(~np.isnan(test_data.x))
assert np.all(~np.isnan(test_data.y))
@parameterized.parameters(itertools.product([1, 10, 100], [10, 20]))
def test_not_all_test_data_same_x(self, num_train: int, num_test: int):
"""Generates testing data and checks not all the same x value."""
np.random.seed(0)
num_test_seeds = 10
input_dim = 2
rng = hk.PRNGSequence(0)
gp_model = gp_classification_envlikelihood.GPClassificationEnvLikelihood(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(num_test, input_dim),
key=next(rng),
tau=1,
num_classes=2,
)
num_distinct_x = 0
reference_data, _ = gp_model.test_data(key=next(rng))
for _ in range(num_test_seeds):
test_data, _ = gp_model.test_data(key=next(rng))
if not np.all(np.isclose(test_data.x, reference_data.x)):
num_distinct_x += 1
assert num_distinct_x > 0
@parameterized.parameters(itertools.product([10], [1], [10]))
def test_valid_labels(self, num_train: int, input_dim: int, num_seeds: int):
"""Checks that for at most 20% of problems, the labels are degenerate."""
num_classes = 2
num_test = 1
rng = hk.PRNGSequence(0)
labels_means = []
for i in range(num_seeds):
np.random.seed(i)
gp_model = gp_classification_envlikelihood.GPClassificationEnvLikelihood(
kernel_fn=nt_kernels.make_benchmark_kernel(),
x_train=np.random.randn(num_train, input_dim),
x_test=np.random.randn(num_test, input_dim),
key=next(rng),
tau=1,
num_classes=num_classes,
)
train_data = gp_model.train_data
labels_means.append(np.mean(train_data.y.copy()))
degenerate_cases = labels_means.count(0.) + labels_means.count(1.)
# Check that for at most 20% of problems, the labels are degenerate
assert degenerate_cases / num_seeds <= 0.2
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/gp_classification_envlikelihood_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for neural_testbed.generative.classification_envlikelihood."""
import functools
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
from neural_testbed.generative import classification_envlikelihood
import numpy as np
class MLPClassificationEnsembleTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([3, 10], [1, 3], [1, 3]))
def test_valid_data(self, num_train: int, input_dim: int, tau: int):
np.random.seed(0)
num_class = 2
rng = hk.PRNGSequence(0)
x_train_generator = lambda k, n: jax.random.normal(k, [n, input_dim])
x_test_generator = classification_envlikelihood.make_gaussian_sampler(
input_dim)
fn_transformed = hk.without_apply_rng(hk.transform(
lambda x: hk.nets.MLP([10, 10, num_class])(x))) # pylint: disable=[unnecessary-lambda]
params = fn_transformed.init(next(rng), np.zeros(shape=(input_dim,)))
logit_fn = lambda x: fn_transformed.apply(params, x)
mlp_model = classification_envlikelihood.ClassificationEnvLikelihood(
logit_fn=logit_fn,
x_train_generator=x_train_generator,
x_test_generator=x_test_generator,
num_train=num_train,
key=next(rng),
tau=tau,
)
# Check that the training data is reasonable.
train_data = mlp_model.train_data
assert train_data.x.shape == (num_train, input_dim)
assert train_data.y.shape == (num_train, 1)
assert np.all(~np.isnan(train_data.x))
assert np.all(~np.isnan(train_data.y))
# Check that the testing data is reasonable.
for _ in range(3):
test_data, log_likelihood = mlp_model.test_data(next(rng))
assert np.isfinite(log_likelihood)
assert test_data.x.shape == (tau, input_dim)
assert test_data.y.shape == (tau, 1)
assert np.all(~np.isnan(test_data.x))
assert np.all(~np.isnan(test_data.y))
@parameterized.parameters(itertools.product([1, 10, 100]))
def test_not_all_test_data_same_x(self, num_train: int):
"""Generates testing data and checks not all the same x value."""
np.random.seed(0)
num_test_seeds = 10
input_dim = 2
num_class = 2
tau = 1
rng = hk.PRNGSequence(0)
x_train_generator = lambda k, n: jax.random.normal(k, [n, input_dim])
x_test_generator = classification_envlikelihood.make_gaussian_sampler(
input_dim)
fn_transformed = hk.without_apply_rng(hk.transform(
lambda x: hk.nets.MLP([10, 10, num_class])(x))) # pylint: disable=[unnecessary-lambda]
params = fn_transformed.init(next(rng), np.zeros(shape=(input_dim,)))
logit_fn = lambda x: fn_transformed.apply(params, x)
mlp_model = classification_envlikelihood.ClassificationEnvLikelihood(
logit_fn=logit_fn,
x_train_generator=x_train_generator,
x_test_generator=x_test_generator,
num_train=num_train,
key=next(rng),
tau=tau,
)
num_distinct_x = 0
reference_data, _ = mlp_model.test_data(key=next(rng))
for _ in range(num_test_seeds):
test_data, _ = mlp_model.test_data(key=next(rng))
if not np.all(np.isclose(test_data.x, reference_data.x)):
num_distinct_x += 1
assert num_distinct_x > 0
@parameterized.parameters(itertools.product([10], [1], [10]))
def test_valid_labels(self, num_train: int, input_dim: int, num_seeds: int):
"""Checks that for at most 20% of problems, the labels are degenerate."""
num_class = 2
tau = 1
rng = hk.PRNGSequence(0)
x_train_generator = lambda k, n: jax.random.normal(k, [n, input_dim])
x_test_generator = classification_envlikelihood.make_gaussian_sampler(
input_dim)
fn_transformed = hk.without_apply_rng(hk.transform(
lambda x: hk.nets.MLP([10, 10, num_class])(x))) # pylint: disable=[unnecessary-lambda]
labels_means = []
for _ in range(num_seeds):
params = fn_transformed.init(next(rng), np.zeros(shape=(input_dim,)))
logit_fn = functools.partial(fn_transformed.apply, params)
mlp_model = classification_envlikelihood.ClassificationEnvLikelihood(
logit_fn=logit_fn,
x_train_generator=x_train_generator,
x_test_generator=x_test_generator,
num_train=num_train,
key=next(rng),
tau=tau,
)
train_data = mlp_model.train_data
labels_means.append(np.mean(train_data.y.copy()))
degenerate_cases = labels_means.count(0.) + labels_means.count(1.)
# Check that for at most 20% of problems, the labels are degenerate
assert degenerate_cases / num_seeds <= 0.2
@parameterized.parameters(itertools.product([1, 10], [1, 10], [1, 2]))
def test_local_generator(self, input_dim: int, tau: int, kappa: int):
"""Checks that the local generator produces valid testing points."""
local_sampler = classification_envlikelihood.make_polyadic_sampler(
input_dim, kappa)
for seed in range(10):
test_x = local_sampler(jax.random.PRNGKey(seed), tau)
assert test_x.shape == (tau, input_dim)
assert np.all(~np.isnan(test_x))
if __name__ == '__main__':
absltest.main()
|
neural_testbed-master
|
neural_testbed/generative/classification_envlikelihood_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""In-memory logging system."""
from typing import Any, Mapping
from neural_testbed import base as testbed_base
from neural_testbed.logging import base as logging_base
import pandas as pd
def wrap_problem(
problem: testbed_base.TestbedProblem) -> testbed_base.TestbedProblem:
return logging_base.LoggingWrapper(problem, Logger())
class Logger(logging_base.Logger):
"""Saves data to python memory."""
def __init__(self):
"""Initializes a new python in-memory logger."""
self._data = []
def write(self, data: Mapping[str, Any]):
"""Adds a row to the internal list of data and saves to CSV."""
self._data.append(data)
@property
def df(self) -> pd.DataFrame:
return pd.DataFrame(self._data)
|
neural_testbed-master
|
neural_testbed/logging/memory_logger.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of logging."""
# Logging
from neural_testbed.logging.base import EntryLoader
from neural_testbed.logging.base import Logger
from neural_testbed.logging.base import LoggingWrapper
# Logging csv
from neural_testbed.logging.csv_logger import wrap_problem as wrap_problem_csv
# Logging to python memory
from neural_testbed.logging.memory_logger import wrap_problem as wrap_problem_memory
|
neural_testbed-master
|
neural_testbed/logging/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CSV based logging system."""
import os
from typing import Any, Mapping
from neural_testbed import base as testbed_base
from neural_testbed.leaderboard import sweep
from neural_testbed.logging import base as logging_base
import pandas as pd
SAFE_SEPARATOR = '-'
INITIAL_SEPARATOR = '_-_'
GP_PREFIX = 'problem_id' + INITIAL_SEPARATOR
def wrap_problem(problem: testbed_base.TestbedProblem,
problem_id: str,
results_dir: str,
overwrite: bool = False) -> testbed_base.TestbedProblem:
logger = Logger(problem_id, results_dir, overwrite)
return logging_base.LoggingWrapper(problem, logger)
class Logger(logging_base.Logger):
"""Saves data to a CSV file via Pandas.
In this simplified logger, each problem_id logs to a unique CSV index by
problem_id. These are saved to a single results_dir by experiment.
We strongly suggest that you use a *fresh* folder for each testbed run.
This logger, along with the corresponding load functionality, serves as a
simple, minimal example for users who need to implement logging to a different
storage system.
"""
def __init__(self,
problem_id: str,
results_dir: str = '/tmp/neural_testbed',
overwrite: bool = False):
"""Initializes a new CSV logger."""
if not os.path.exists(results_dir):
try:
os.makedirs(results_dir)
except OSError: # concurrent processes can makedir at same time
pass
# The default '/' symbol is dangerous for file systems!
safe_problem_id = problem_id.replace(sweep.SEPARATOR, SAFE_SEPARATOR)
filename = f'{GP_PREFIX}{safe_problem_id}.csv'
save_path = os.path.join(results_dir, filename)
if os.path.exists(save_path) and not overwrite:
raise ValueError(
f'File {save_path} already exists. Specify a different '
'directory, or set overwrite=True to overwrite existing data.')
self._data = []
self._save_path = save_path
def write(self, data: Mapping[str, Any]):
"""Adds a row to the internal list of data and saves to CSV."""
self._data.append(data)
df = pd.DataFrame(self._data)
df.to_csv(self._save_path, index=False)
|
neural_testbed-master
|
neural_testbed/logging/csv_logger.py
|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An abstract base class for loggers."""
import abc
import time
from typing import Any, Callable, Dict, Mapping, Sequence, Tuple
from acme.utils import loggers
import chex
from neural_testbed import base as testbed_base
import pandas as pd
# TODO(author2): turn this into a typevar typed on the entry Type
Entry = Any
# Returns a dataframe with the results and a sequence of sweep_vars
EntryLoader = Callable[[Entry], Tuple[pd.DataFrame, Sequence[str]]]
class Logger(abc.ABC):
"""A logger has a `write` method."""
@abc.abstractmethod
def write(self, data: Mapping[str, Any]):
"""Writes `data` to destination (file, terminal, database, etc)."""
class LoggingWrapper(testbed_base.TestbedProblem):
"""Wraps a testbed problem with a logger."""
def __init__(self,
problem: testbed_base.TestbedProblem,
logger: Logger):
self._problem = problem
self._logger = logger
self._start = time.time()
self._train_start = self._start
@property
def train_data(self) -> testbed_base.Data:
self._train_start = time.time()
return self._problem.train_data
def evaluate_quality(
self,
enn_sampler: testbed_base.EpistemicSampler) -> testbed_base.ENNQuality:
# Before evaluating enn, we record the time at the end of training.
train_end = time.time()
enn_quality = self._problem.evaluate_quality(enn_sampler)
results = {
'kl_estimate': float(enn_quality.kl_estimate),
'total_seconds': time.time() - self._start,
'train_seconds': train_end - self._train_start,
'evaluation_seconds': time.time() - train_end,
}
if enn_quality.extra:
extra_results = clean_results(enn_quality.extra)
results.update({
key: value for key, value in extra_results.items()
})
self._logger.write(results)
return enn_quality
@property
def prior_knowledge(self) -> testbed_base.PriorKnowledge:
return self._problem.prior_knowledge
@property
def problem(self) -> testbed_base.TestbedProblem:
problem = self._problem
if hasattr(problem, 'problem'):
return problem.problem
return problem
def clean_results(results: Dict[str, Any]) -> Dict[str, Any]:
"""Cleans the results for logging (can't log jax arrays)."""
def clean_result(value: Any) -> Any:
value = loggers.to_numpy(value)
if isinstance(value, chex.ArrayNumpy) and value.size == 1:
value = float(value)
return value
for key, value in results.items():
results[key] = clean_result(value)
return results
|
neural_testbed-master
|
neural_testbed/logging/base.py
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Visualise bundle adjustment results.
Example to run:
python run_visualise.py --filename KV4jIAq3WJo_155_165.pkl
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle as pickle
import errno
import os
import subprocess
import sys
from absl import flags
import cv2
import matplotlib.pyplot as plt
import numpy as np
import plot_utils
import skvideo.io
from third_party.activity_net.download import download_clip
import third_party.hmr.renderer as vis_util
# Input
flags.DEFINE_string('filename', '', 'The annoation pickle file')
flags.DEFINE_string('smpl_face_path', 'smpl_faces.npy',
'Path to smpl model face file.')
# Output
flags.DEFINE_string(
'output_dir', 'results', 'Where to write results to.'
'Directory automatically created.')
def mkdir(dirname):
"""Create directory if it does not exist."""
try:
os.makedirs(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def im_save_cv(image, filename):
"""Write image with OpenCV, converting from BGR to RGB format."""
cv2.imwrite(filename, image[:, :, (2, 1, 0)])
def visualize(img,
joints,
vertices,
camera,
image_name,
output_dir,
renderer=None,
color_id=0):
"""Renders the result in original image coordinate frame.
Args:
img: The image
joints: 2D keypoints, in the image coordinate frame.
vertices: Vertices of the SMPL mesh.
camera: Camera predicted.
image_name: Name of image for saving.
output_dir: Directory to save results to
renderer: Renderer object to use.
color_id: 0 is blue, and 1 is light pink. For the visualisation. The
colours are defined in the renderer.
"""
cam_for_render = camera * img.shape[0]
vert_shifted = np.copy(vertices)
# Approximate an orthographic camera:
# move points away and adjust the focal length to zoom in.
vert_shifted[:, -1] = vert_shifted[:, -1] + 100.
cam_for_render[0] *= 100.
rend_img_overlay = renderer(
vert_shifted,
cam=cam_for_render,
img=img,
do_alpha=True,
color_id=color_id)
rend_img = renderer(
vert_shifted,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
rend_img_vp1 = renderer.rotated(
vert_shifted,
60,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
rend_img_vp2 = renderer.rotated(
vert_shifted,
-60,
cam=cam_for_render,
img_size=img.shape[:2],
color_id=color_id)
save_name = os.path.join(output_dir, image_name + '.jpg')
fig = plot_utils.plot_summary_figure(img, joints, rend_img_overlay, rend_img,
rend_img_vp1, rend_img_vp2, save_name)
plt.close(fig)
def transform_keypoints_to_image(keypoints, img):
"""Transform keypoints from range [0, 1] to image coordinates."""
keypoints[:, :, 0] *= img.shape[0]
keypoints[:, :, 1] *= img.shape[
0] # The saved keypoints are scaled by image height.
return keypoints
def parse_filename(filename):
"""Parse filename of the pickle file."""
name = os.path.basename(filename)
name = name.replace('.pkl', '')
tokens = name.split('_')
end_time = int(tokens[-1])
start_time = int(tokens[-2])
video_id = '_'.join(tokens[0:-2])
return video_id, start_time, end_time
def get_frame_rate(video_path):
"""Get frame rate of the video from its metadata."""
meta_data = skvideo.io.ffprobe(video_path)
if 'video' in meta_data.keys():
meta_data = meta_data['video']
if '@avg_frame_rate' in meta_data:
frame_rate = eval(meta_data['@avg_frame_rate'])
else:
frame_rate = None
return frame_rate
def video_from_images(directory, save_name):
"""Create video from images saved in directory using ffmpeg."""
command = [
'ffmpeg', '-framerate', '25', '-pattern_type',
'glob -i \'{}/*.jpg\''.format(directory), '-c:v', 'libx264', '-pix_fmt',
'yuv420p', '-loglevel', 'panic', save_name
]
command = ' '.join(command)
try:
_ = subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except:
pass
def load_pickle(filename):
"""Read pickle file."""
with open(filename) as fp:
data = pickle.load(fp)
return data
def main(config):
data = load_pickle(config.filename)
video_id, start_time, end_time = parse_filename(config.filename)
video_path = '/tmp/' + video_id + '.mp4'
status, message = download_clip(video_id, video_path, start_time, end_time)
if not status:
print('Video not downloaded')
print(message)
sys.exit()
video = skvideo.io.vread(video_path)
frame_rate = get_frame_rate(video_path)
if not frame_rate:
print('Error. Could not determine frame rate of video')
sys.exit()
output_dir = os.path.join(config.output_dir, video_id)
mkdir(output_dir)
keypoints = transform_keypoints_to_image(data['2d_keypoints'],
video[0].squeeze())
renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)
for i in range(data['time'].size):
idx = int(round(data['time'][i] * frame_rate))
if idx >= video.shape[0]:
break
img = video[idx].squeeze()
image_name = '{:>04}'.format(i)
visualize(
img,
joints=keypoints[i].squeeze(),
vertices=data['vertices'][i].squeeze(),
camera=data['camera'][i].squeeze(),
image_name=image_name,
output_dir=output_dir,
renderer=renderer)
if i % 20 == 0:
print('Processed {:3d} / {:3d}'.format(i + 1, data['time'].size))
video_from_images(output_dir, os.path.join(output_dir, video_id + '.mp4'))
if __name__ == '__main__':
config_ = flags.FLAGS
config_(sys.argv)
main(config_)
|
Temporal-3D-Pose-Kinetics-master
|
run_visualise.py
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Create plots with Matplotlib to visualise the result."""
import StringIO
import matplotlib.pyplot as plt
import numpy as np
HMR_JOINT_NAMES = [
'right_ankle',
'right_knee',
'right_hip',
'left_hip',
'left_knee',
'left_ankle',
'right_wrist',
'right_elbow',
'right_shoulder',
'left_shoulder',
'left_elbow',
'left_wrist',
'neck',
'head_top',
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
]
MSCOCO_JOINT_NAMES = [
'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear', 'left_shoulder',
'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist',
'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
]
coco_to_hmr = []
for name in MSCOCO_JOINT_NAMES:
index = HMR_JOINT_NAMES.index(name)
coco_to_hmr.append(index)
PARENTS_COCO_PLUS = [
1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, -1, 13, -1, -1, 15, 16
]
COLOURS = []
for name in HMR_JOINT_NAMES:
if name.startswith('left'):
c = 'r'
elif name.startswith('right'):
c = 'g'
else:
c = 'm'
COLOURS.append(c)
def plot_keypoints_2d(image,
joints_2d,
ax=None,
show_plot=False,
title='',
is_coco_format=False):
"""Plot 2d keypoints overlaid on image."""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if hasattr(ax, 'set_axis_off'):
ax.set_axis_off()
if is_coco_format:
kp = np.zeros((len(HMR_JOINT_NAMES), 2))
kp[coco_to_hmr, :] = joints_2d
joints_2d = kp
if image is not None:
ax.imshow(image)
joint_colour = 'c' if not is_coco_format else 'b'
s = 30 * np.ones(joints_2d.shape[0])
for i in range(joints_2d.shape[0]):
x, y = joints_2d[i, :]
if x == 0 and y == 0:
s[i] = 0
ax.scatter(
joints_2d[:, 0].squeeze(),
joints_2d[:, 1].squeeze(),
s=30,
c=joint_colour)
for idx_i, idx_j in enumerate(PARENTS_COCO_PLUS):
if idx_j >= 0:
pair = [idx_i, idx_j]
x, y = joints_2d[pair, 0], joints_2d[pair, 1]
if x[0] > 0 and y[0] > 0 and x[1] > 0 and y[1] > 0:
ax.plot(x.squeeze(), y.squeeze(), c=COLOURS[idx_i], linewidth=1.5)
ax.set_xlim([0, image.shape[1]])
ax.set_ylim([image.shape[0], 0])
if title:
ax.set_title(title)
if show_plot:
plt.show()
return ax
def plot_summary_figure(img,
joints_2d,
rend_img_overlay,
rend_img,
rend_img_vp1,
rend_img_vp2,
save_name=None):
"""Create plot to visulise results."""
fig = plt.figure(1, figsize=(20, 12))
plt.clf()
plt.subplot(231)
plt.imshow(img)
plt.title('Input')
plt.axis('off')
ax_skel = plt.subplot(232)
ax_skel = plot_keypoints_2d(img, joints_2d, ax_skel)
plt.title('Joint Projection')
plt.axis('off')
plt.subplot(233)
plt.imshow(rend_img_overlay)
plt.title('3D Mesh overlay')
plt.axis('off')
plt.subplot(234)
plt.imshow(rend_img)
plt.title('3D mesh')
plt.axis('off')
plt.subplot(235)
plt.imshow(rend_img_vp1)
plt.title('Other viewpoint (+60 degrees)')
plt.axis('off')
plt.subplot(236)
plt.imshow(rend_img_vp2)
plt.title('Other viewpoint (-60 degrees)')
plt.axis('off')
plt.draw()
if save_name is not None:
buf = StringIO.StringIO()
plt.savefig(buf, format='jpg')
buf.seek(0)
with open(save_name, 'w') as fp:
fp.write(buf.read(-1))
else:
plt.show()
return fig
|
Temporal-3D-Pose-Kinetics-master
|
plot_utils.py
|
Temporal-3D-Pose-Kinetics-master
|
third_party/__init__.py
|
|
Temporal-3D-Pose-Kinetics-master
|
third_party/activity_net/__init__.py
|
|
"""Render meshes using OpenDR.
Code is from:
https://github.com/akanazawa/hmr/blob/master/src/util/renderer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import cv2
import numpy as np
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
colors = {
# colorbline/print/copy safe:
'light_blue': [0.65098039, 0.74117647, 0.85882353],
'light_pink': [.9, .7, .7], # This is used to do no-3d
}
class SMPLRenderer(object):
"""Utility class to render SMPL models."""
def __init__(self, img_size=224, flength=500., face_path='smpl_faces.npy'):
self.faces = np.load(face_path)
self.w = img_size
self.h = img_size
self.flength = flength
def __call__(self,
verts,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color_id=0,
img_size=None):
# cam is 3D [f, px, py]
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
imtmp = render_model(
verts,
self.faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color_id=color_id)
return (imtmp * 255).astype('uint8')
def rotated(self,
verts,
deg,
cam=None,
axis='y',
img=None,
do_alpha=True,
far=None,
near=None,
color_id=0,
img_size=None):
if axis == 'y':
around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
elif axis == 'x':
around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
else:
around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
center = verts.mean(axis=0)
new_v = np.dot((verts - center), around) + center
return self.__call__(
new_v,
cam,
img=img,
do_alpha=do_alpha,
far=far,
near=near,
img_size=img_size,
color_id=color_id)
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(120),
color=colors['light_pink']):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge(
(b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color_id=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1 else img
if color_id is None:
color = colors['light_blue']
else:
color_list = colors.values()
color = color_list[color_id % len(color_list)]
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp
|
Temporal-3D-Pose-Kinetics-master
|
third_party/hmr/renderer.py
|
Temporal-3D-Pose-Kinetics-master
|
third_party/hmr/__init__.py
|
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Start demo GUI for Spriteworld task configs.
To play a task, run this on the task config:
```bash
python run_demo.py --config=$path_to_task_config$
```
Be aware that this demo overrides the action space and renderer for ease of
playing, so those will be different from what are specified in the task config.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl import app
from absl import flags
from spriteworld import demo_ui
FLAGS = flags.FLAGS
flags.DEFINE_string('config', 'spriteworld.configs.cobra.clustering',
'Module name of task config to use.')
flags.DEFINE_string('mode', 'train', 'Task mode, "train" or "test"]')
flags.DEFINE_boolean('task_hsv_colors', True,
'Whether the task config uses HSV as color factors.')
flags.DEFINE_integer('render_size', 256,
'Height and width of the output image.')
flags.DEFINE_integer('anti_aliasing', 10, 'Renderer anti-aliasing factor.')
def main(_):
config = importlib.import_module(FLAGS.config)
config = config.get_config(FLAGS.mode)
demo_ui.setup_run_ui(config, FLAGS.render_size, FLAGS.task_hsv_colors,
FLAGS.anti_aliasing)
if __name__ == '__main__':
app.run(main)
|
spriteworld-master
|
run_demo.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Template for running an agent on Spriteworld tasks.
This script runs an agent on a Spriteworld task. The agent takes random actions
and does not learn, so this serves only as an example of how to run an agent in
the environment, logging task success and mean rewards.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from absl import app
from absl import flags
from absl import logging
import numpy as np
from six.moves import range
from spriteworld import environment
from spriteworld import renderers
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_episodes', 100, 'Number of training episodes.')
flags.DEFINE_string('config',
'spriteworld.configs.cobra.goal_finding_new_position',
'Module name of task config to use.')
flags.DEFINE_string('mode', 'train', 'Task mode, "train" or "test"]')
class RandomAgent(object):
"""Agent that takes random actions."""
def __init__(self, env):
"""Construct random agent."""
self._env = env
def step(self, timestep):
# observation is a dictionary with renderer outputs to be used for training
observation = timestep.observation
del observation
del timestep
action = self._env.action_space.sample()
return action
def main(argv):
del argv
config = importlib.import_module(FLAGS.config)
config = config.get_config(FLAGS.mode)
config['renderers']['success'] = renderers.Success() # Used for logging
env = environment.Environment(**config)
agent = RandomAgent(env)
# Loop over episodes, logging success and mean reward per episode
for episode in range(FLAGS.num_episodes):
timestep = env.reset()
rewards = []
while not timestep.last():
action = agent.step(timestep)
timestep = env.step(action)
rewards.append(timestep.reward)
logging.info('Episode %d: Success = %r, Reward = %s.', episode,
timestep.observation['success'], np.nanmean(rewards))
if __name__ == '__main__':
app.run(main)
|
spriteworld-master
|
example_run_loop.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Installation script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
setup(
name='spriteworld',
version='1.0.2',
description=('Spriteworld is a python-based reinforcement learning '
'environment consisting of a 2-dimensional arena with objects '
'that can be freely moved.'),
author='DeepMind',
url='https://github.com/deepmind/spriteworld/',
license='Apache License, Version 2.0',
keywords=[
'ai',
'reinforcement-learning',
'python',
'machine learning',
'objects',
],
packages=find_packages(
exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
install_requires=[
'absl-py',
'dm_env',
'enum34',
'matplotlib',
'mock',
'numpy',
'pillow',
'scikit-learn',
'six',
],
tests_require=[
'nose',
'absl-py',
],
extras_require={
'gym': ['gym'],
},
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
spriteworld-master
|
setup.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Factor distribution library.
This library contains classes for defining distributions of sprite factors.
A number of set-theoretic operations are supported, with which it is possible to
define factor distributions that are arbitrarily nested mixtures, intersections,
products, and differences of single-factor continuous/discrete distributions.
A factor specification is called a "spec", which is a dictionary of sprite
factors, hence can have keys such as "size", "shape", "x_pos", etc. However, the
classes in this file are general and make no reference to the particular factor
names used by Spriteworld sprites.
All distributions inherit from AbstractDistribution. They have a "sample()"
method, which returns a spec. The keys of this spec can be accessed by the
"keys" property. Distributions also have a "contains(spec)" method, which checks
if the argument "spec" is in the support of the distribution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import numpy as np
import six
# Maximum number of tries used for rejection sampling from Intersection and
# SetMinus distributions
_MAX_TRIES = int(1e5)
@six.add_metaclass(abc.ABCMeta)
class AbstractDistribution(object):
"""Abstract class from which all distributions should inherit."""
@abc.abstractmethod
def sample(self, rng=None):
"""Sample a spec from this distribution. Returns a dictionary.
Args:
rng: Random number generator. Fed into self._get_rng(), if None defaults
to np.random.
"""
@abc.abstractmethod
def contains(self, spec):
"""Return whether distribution contains spec dictionary."""
@abc.abstractmethod
def to_str(self, indent):
"""Recursive string description of this distribution."""
def __str__(self):
return self.to_str(indent=0)
def _get_rng(self, rng=None):
"""Get random number generator, defaulting to np.random."""
return np.random if rng is None else rng
@abc.abstractproperty
def keys(self):
"""The set of keys in specs sampled from this distribution."""
class Continuous(AbstractDistribution):
"""Continuous 1-dimensional uniform distribution."""
def __init__(self, key, minval, maxval, dtype='float32'):
"""Construct continuous 1-dimensional uniform distribution.
Args:
key: String factor name. self.sample() returns {key: _}.
minval: Scalar minimum value.
maxval: Scalar maximum value.
dtype: String numpy dtype.
"""
self.key = key
self.minval = minval
self.maxval = maxval
self.dtype = dtype
def sample(self, rng=None):
"""Sample value in [self.minval, self.maxval) and return dict."""
rng = self._get_rng(rng)
out = rng.uniform(low=self.minval, high=self.maxval)
out = np.cast[self.dtype](out)
return {self.key: out}
def contains(self, spec):
"""Check if spec[self.key] is in [self.minval, self.maxval)."""
if self.key not in spec:
raise KeyError('key {} is not in spec {}, but must be to evaluate '
'containment.'.format(self.key, spec))
else:
return spec[self.key] >= self.minval and spec[self.key] < self.maxval
def to_str(self, indent):
s = '<Continuous: key={}, mival={}, maxval={}, dtype={}>'.format(
self.key, self.minval, self.maxval, self.dtype)
return indent * ' ' + s
@property
def keys(self):
return set([self.key])
class Discrete(AbstractDistribution):
"""Discrete distribution."""
def __init__(self, key, candidates, probs=None):
"""Construct discrete distribution.
Args:
key: String. Factor name.
candidates: Iterable. Discrete values to sample from.
probs: None or iterable of floats summing to 1. Candidate sampling
probabilities. If None, candidates are sampled uniformly.
"""
self.candidates = candidates
self.key = key
self.probs = probs
def sample(self, rng=None):
rng = self._get_rng(rng)
out = self.candidates[rng.choice(len(self.candidates), p=self.probs)]
return {self.key: out}
def contains(self, spec):
if self.key not in spec:
raise KeyError('key {} is not in spec {}, but must be to evaluate '
'containment.'.format(self.key, spec))
else:
return spec[self.key] in self.candidates
def to_str(self, indent):
s = '<Discrete: key={}, candidates={}, probs={}>'.format(
self.key, self.candidates, self.probs)
return indent * ' ' + s
@property
def keys(self):
return set([self.key])
class Mixture(AbstractDistribution):
"""Mixture of distributions."""
def __init__(self, components, probs=None):
"""Construct mixture of distributions.
This is a mixture distribution, not a union, so if the components overlap,
their overlap will be sampled more than the non-overlapping regions.
Args:
components: Iterable of component distributions. Must all have the same
key sets.
probs: None or iterable of floats summing to 1. Sampling probabilities for
the components.
"""
self.components = components
if probs is None:
self.probs = np.ones(len(components)) / len(components)
else:
self.probs = np.array(probs)
self._keys = components[0].keys
for c in components[1:]:
if c.keys != self._keys:
raise ValueError(
'All components must have the same key sets. However detected key '
'sets {} and {}'.format(self._keys, c.keys))
def sample(self, rng=None):
rng = self._get_rng(rng)
sample_index = rng.choice(len(self.components), p=self.probs)
sample = self.components[sample_index].sample(rng=rng)
return sample
def contains(self, spec):
return any(c.contains(spec) for c in self.components)
def to_str(self, indent):
components_strings = [x.to_str(indent + 2) for x in self.components]
s = (indent * ' ' + '<Mixture:\n' +
(indent + 1) * ' ' + 'components=[\n{},\n' +
(indent + 1) * ' ' + '],\n' +
(indent + 1) * ' ' + 'probs={}>').format(
',\n'.join(components_strings), self.probs)
return s
@property
def keys(self):
return self._keys
class Intersection(AbstractDistribution):
"""Intersection of component distributions."""
def __init__(self, components, index_for_sampling=0):
"""Construct intersection of component distributions.
Samples are generated by sampling from one of the components and then doing
rejection with the others, so if the component being sampled has some
non-uniformity (e.g. a mixture with non-uniform probs), that non-uniformity
will be inherited by the intersection.
Args:
components: Iterable of distributions.
index_for_sampling: Int. Index of the component to use for sampling. All
other components will be used to reject its samples. For efficiency, the
user should ensure index_for_sampling corresponds to the smallest
component distribution.
"""
self.components = components
self.index_for_sampling = index_for_sampling
self._keys = components[0].keys
for c in components[1:]:
if c.keys != self._keys:
raise ValueError(
'All components must have the same key sets. However detected key '
'sets {} and {}'.format(self._keys, c.keys))
def sample(self, rng=None):
rng = self._get_rng(rng)
tries = 0
while tries < _MAX_TRIES:
tries += 1
sample = self.components[self.index_for_sampling].sample(rng=rng)
if all(c.contains(sample) for c in self.components):
return sample
raise ValueError('Maximum number of tried exceeded when trying to sample '
'from {}.'.format(str(self)))
def contains(self, spec):
return all(c.contains(spec) for c in self.components)
def to_str(self, indent):
components_strings = [x.to_str(indent + 2) for x in self.components]
s = (indent * ' ' + '<Intersection:\n' +
(indent + 1) * ' ' + 'components=[\n{},\n' +
(indent + 1) * ' ' + '],\n' +
(indent + 1) * ' ' + 'index_for_sampling={}>').format(
',\n'.join(components_strings), self.index_for_sampling)
return s
@property
def keys(self):
return self._keys
class Product(AbstractDistribution):
"""Product distribution."""
def __init__(self, components):
"""Construct product distribution.
This is used to create distributions over larger numbers of factors by
taking the product of components. The components must have disjoint key
sets.
Args:
components: Iterable of distributions.
"""
self.components = components
self._keys = functools.reduce(set.union, [set(c.keys) for c in components])
num_keys = sum(len(c.keys) for c in components)
if len(self._keys) < num_keys:
raise ValueError(
'All components must have different keys, yet there are {} '
'overlapping keys.'.format(num_keys - len(self._keys)))
def sample(self, rng=None):
rng = self._get_rng(rng)
sample = {}
for c in self.components:
sample.update(c.sample(rng=rng))
return sample
def contains(self, spec):
return all(c.contains(spec) for c in self.components)
def to_str(self, indent):
components_strings = [x.to_str(indent + 2) for x in self.components]
s = (indent * ' ' + '<Product:\n' +
(indent + 1) * ' ' + 'components=[\n{},\n' +
(indent + 1) * ' ' + ']>').format(
',\n'.join(components_strings))
return s
@property
def keys(self):
return self._keys
class SetMinus(AbstractDistribution):
"""Setminus of distributions."""
def __init__(self, base, hold_out):
"""Construct setminus of distributions..
This uses rejection sampling to take the difference of two distributions.
Args:
base: Distribution from which candidate samples are drawn.
hold_out: Distribution used to reject samples from base.
"""
self.base = base
self.hold_out = hold_out
self._keys = base.keys
if not hold_out.keys.issubset(self._keys):
raise ValueError(
'Keys {} of hold_out is not a subset of keys {} of SetMinus base '
'distribution.'
.format(hold_out.keys, base.keys))
def sample(self, rng=None):
rng = self._get_rng(rng)
tries = 0
while tries < _MAX_TRIES:
tries += 1
sample = self.base.sample(rng=rng)
if not self.hold_out.contains(sample):
return sample
raise ValueError('Maximum number of tried exceeded when trying to sample '
'from {}.'.format(str(self)))
def contains(self, spec):
return self.base.contains(spec) and not self.hold_out.contains(spec)
def to_str(self, indent):
s = (indent * ' ' + '<SetMinus:\n' +
(indent + 1) * ' ' + 'base=\n{},\n' +
(indent + 1) * ' ' + 'hold_out=\n{}>').format(
self.base.to_str(indent + 2), self.hold_out.to_str(indent + 2))
return s
@property
def keys(self):
return self._keys
class Selection(AbstractDistribution):
"""Filter a source distribution."""
def __init__(self, base, filtering):
"""Construct selection of a base distribution given a filter.
Given a base Distribution and a filter Distribution, returns samples of
the base which are compatible with the filter.
This is related to Intersection, but does not expect the base and filters
to have the same keys. Instead, the filters should be subsets of the base.
This is the same as SetMinus, except the filter accepts instead of rejects
samples.
Args:
base: Distribution from which candidate samples are drawn.
filtering: Distribution used to select samples from base.
"""
self.base = base
self.filtering = filtering
self._keys = base.keys
if not filtering.keys.issubset(self._keys):
raise ValueError(
'Keys {} of filtering is not a subset of keys {} of Selection base '
'distribution.'.format(filtering.keys, base.keys))
def sample(self, rng=None):
rng = self._get_rng(rng)
tries = 0
while tries < _MAX_TRIES:
tries += 1
sample = self.base.sample(rng=rng)
if self.filtering.contains(sample):
return sample
raise ValueError(
'Maximum number of tried exceeded when trying to sample from {}.'
.format(str(self)))
def contains(self, spec):
return self.base.contains(spec) and self.filtering.contains(spec)
def to_str(self, indent):
s = (indent * ' ' + '<Selection:\n' + (indent + 1) * ' ' +
'base=\n{},\n' + (indent + 1) * ' ' + 'filtering=\n{}>').format(
self.base.to_str(indent + 2), self.filtering.to_str(indent + 2))
return s
@property
def keys(self):
return self._keys
|
spriteworld-master
|
spriteworld/factor_distributions.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Generators for producing lists of sprites based on factor distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from spriteworld import sprite
def generate_sprites(factor_dist, num_sprites=1):
"""Create callable that samples sprites from a factor distribution.
Args:
factor_dist: The factor distribution from which to sample. Should be an
instance of factor_distributions.AbstractDistribution.
num_sprites: Int or callable returning int. Number of sprites to generate
per call.
Returns:
_generate: Callable that returns a list of Sprites.
"""
def _generate():
n = num_sprites() if callable(num_sprites) else num_sprites
sprites = [sprite.Sprite(**factor_dist.sample()) for _ in range(n)]
return sprites
return _generate
def chain_generators(*sprite_generators):
"""Chain generators by concatenating output sprite sequences.
Essentially an 'AND' operation over sprite generators. This is useful when one
wants to control the number of samples from the modes of a multimodal sprite
distribution.
Note that factor_distributions.Mixture provides weighted mixture
distributions, so chain_generators() is typically only used when one wants to
forces the different modes to each have a non-zero number of sprites.
Args:
*sprite_generators: Callable sprite generators.
Returns:
_generate: Callable returning a list of sprites.
"""
def _generate():
return list(
itertools.chain(*[generator() for generator in sprite_generators]))
return _generate
def sample_generator(sprite_generators, p=None):
"""Sample one element from a set of sprite generators.
Essential an 'OR' operation over sprite generators. This returns a callable
that samples a generator from sprite_generators and calls it.
Note that if sprite_generators each return 1 sprite, this functionality can be
achieved with factor_distributions.Mixture, so sample_generator is typically
used when sprite_generators each return multiple sprites. Effectively it
allows dependant sampling from a multimodal factor distribution.
Args:
sprite_generators: Iterable of callable sprite generators.
p: Probabilities associated with each generator. If None, assumes uniform
distribution.
Returns:
_generate: Callable sprite generator.
"""
def _generate():
sample_index = np.random.choice(len(sprite_generators), p=p)
sampled_generator = sprite_generators[sample_index]
return sampled_generator()
return _generate
def shuffle(sprite_generator):
"""Randomize the order of sprites sample from sprite_generator.
This is useful because sprites are z-layered with occlusion according to their
order, so is sprite_generator is the output of chain_generators(), then
sprites from some component distributions will always be behind sprites from
others.
An alternate design would be to let the environment handle sprite ordering,
but this design is preferable because the order can be controlled more finely.
For example, this allows the user to specify one sprite (e.g. the agent's
body) to always be in the foreground while all the others are randomly
ordered.
Args:
sprite_generator: Callable return a list of sprites.
Returns:
_generate: Callable sprite generator.
"""
def _generate():
sprites = sprite_generator()
order = np.arange(len(sprites))
np.random.shuffle(order)
return [sprites[i] for i in order]
return _generate
|
spriteworld-master
|
spriteworld/sprite_generators.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Shapes for Spriteworld.
Contains functions that generate np.arrays containing vertex arrays for various
sprite shapes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
def _polar2cartesian(r, theta):
return r * np.array([np.cos(theta), np.sin(theta)])
def polygon(num_sides, theta_0=0.):
"""Generate the vertices of a regular polygon.
Args:
num_sides: Int. Number of sides of the polygon.
theta_0: Float. Initial angle to start the vertices from.
Returns:
path: Array of vertices of the polygon, normalized so it has area 1.
"""
theta = 2 * np.pi / num_sides
path = np.array(
[_polar2cartesian(1, i * theta + theta_0) for i in range(num_sides)])
area = num_sides * np.sin(theta / 2) * np.cos(theta / 2)
path = np.array(path) / np.sqrt(area)
return path
def star(num_sides, point_height=1, theta_0=0.):
"""Generate the vertices of a regular star shape.
Args:
num_sides: Int. Number of sides (i.e. number of points) in the star.
point_height: Scalar. Height of each point of the star, relative to the
radius of the star's inscribed circle.
theta_0: Float. Initial angle to start the vertices from.
Returns:
path: Array of vertices of the star, normalized so the star has area 1.
"""
point_to_center = 1 + point_height
theta = 2 * np.pi / num_sides
path = np.empty([2 * num_sides, 2])
for i in range(num_sides):
path[2 * i] = _polar2cartesian(1, i * theta + theta_0)
path[2 * i + 1] = _polar2cartesian(point_to_center,
(i + 0.5) * theta + theta_0)
area = point_to_center * num_sides * np.sin(theta / 2)
path = np.array(path) / np.sqrt(area)
return path
def spokes(num_sides, spoke_height=1, theta_0=0.):
"""Generate the vertices of a regular rectangular spoke shape.
This is like a star, except the points are rectangular. For example, if
num_sides = 4, it will look like this:
O O
O O O O
O O O
O O
O O
O O
O O O
O O O O
O O
Args:
num_sides: Int. Number of sides (i.e. number of points) in the star.
spoke_height: Scalar. Height of each spoke, relative to the radius of the
spoke shape's inscribed circle.
theta_0: Float. Initial angle to start the vertices from.
Returns:
path: Array of vertices of the spoke shape, normalized so the spoke shape
has area 1.
"""
theta = 2 * np.pi / num_sides
path = np.empty([3 * num_sides, 2])
spoke = _polar2cartesian(spoke_height, -0.5 * theta + theta_0)
for i in range(num_sides):
vertex = _polar2cartesian(1, i * theta + theta_0)
path[3 * i] = spoke + vertex
path[3 * i + 1] = vertex
spoke = _polar2cartesian(spoke_height, (i + 0.5) * theta + theta_0)
path[3 * i + 2] = spoke + vertex
area = num_sides * np.sin(theta / 2) * (2 + np.cos(theta / 2))
path = np.array(path) / np.sqrt(area)
return path
|
spriteworld-master
|
spriteworld/shapes.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tasks for Spriteworld.
Each class in this file defines a task. Namely, contains a reward function and a
success function for Spriteworld.
The reward function maps an iterable of sprites to a float. The success function
maps an iterable of sprites to a bool.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from sklearn import metrics
@six.add_metaclass(abc.ABCMeta)
class AbstractTask(object):
"""Abstract class from which all tasks should inherit."""
@abc.abstractmethod
def reward(self, sprites):
"""Compute reward for the given configuration of sprites.
This reward is evaluated per-step by the Spriteworld environment. See
Environment.step() in environment.py for usage. Hence if this is a smooth
function the agent will have shaped reward. Sparse rewards awarded only at
the end of an episode can be implemented by returning non-zero reward only
for a desired goal configuration of sprites (see sub-classes below for
examples).
Args:
sprites: Iterable of sprite instances.
Returns:
Float reward for the given configuration of sprites.
"""
@abc.abstractmethod
def success(self, sprites):
"""Compute whether the task has been successfully solved.
Args:
sprites: Iterable of sprite instances.
Returns:
Boolean. Whether or not the given configuration of sprites successfully
solves the task.
"""
class NoReward(AbstractTask):
"""Used for environments that have no task. Reward is always 0."""
def __init__(self):
pass
def reward(self, unused_sprites):
"""Calculate reward from sprites."""
return 0.0
def success(self, unused_sprites):
return False
class FindGoalPosition(AbstractTask):
"""Used for tasks that require moving some sprites to a goal position."""
def __init__(self,
filter_distrib=None,
goal_position=(0.5, 0.5),
terminate_distance=0.05,
terminate_bonus=0.0,
weights_dimensions=(1, 1),
sparse_reward=False,
raw_reward_multiplier=50):
"""Construct goal-finding task.
This task rewards the agent for bringing all sprites with factors contained
in a filter distribution to a goal position. Rewards are offset to be
negative, except for a termination bonus when the goal is reached.
Args:
filter_distrib: None or instance of
factor_distributions.AbstractDistribution. If None, all sprites must be
brought to the goal position. If not None, only sprites with factors
contained in this distribution must be brought to the goal position.
goal_position: Position of the goal.
terminate_distance: Distance from goal position at which to clip reward.
If all sprites are within this distance, terminate episode.
terminate_bonus: Extra bonus for getting all sprites within
terminate_distance.
weights_dimensions: Weights modifying the contributions of the (x,
y)-dimensions to the distance to goal computation.
sparse_reward: Boolean (default False), whether to provide dense rewards
or only reward at the end of an episode.
raw_reward_multiplier: Multiplier for the reward to be applied before
terminate_bonus. Empirically, 50 seems to be a good value.
"""
self._filter_distrib = filter_distrib
self._goal_position = np.asarray(goal_position)
self._terminate_bonus = terminate_bonus
self._terminate_distance = terminate_distance
self._sparse_reward = sparse_reward
self._weights_dimensions = np.asarray(weights_dimensions)
self._raw_reward_multiplier = raw_reward_multiplier
def _single_sprite_reward(self, sprite):
goal_distance = np.sum(self._weights_dimensions *
(sprite.position - self._goal_position)**2.)**0.5
raw_reward = self._terminate_distance - goal_distance
return self._raw_reward_multiplier * raw_reward
def _filtered_sprites_rewards(self, sprites):
"""Returns list of rewards for the filtered sprites."""
rewards = [
self._single_sprite_reward(s) for s in sprites if
self._filter_distrib is None or self._filter_distrib.contains(s.factors)
]
return rewards
def reward(self, sprites):
"""Calculate total reward summed over filtered sprites."""
reward = 0.
rewards = self._filtered_sprites_rewards(sprites)
if not rewards: # No sprites get through the filter, so make reward NaN
return np.nan
dense_reward = np.sum(rewards)
if all(np.array(rewards) >= 0): # task succeeded
reward += self._terminate_bonus
reward += dense_reward
elif not self._sparse_reward:
reward += dense_reward
return reward
def success(self, sprites):
return all(np.array(self._filtered_sprites_rewards(sprites)) >= 0)
class Clustering(AbstractTask):
"""Task for cluster by color/shape conditions."""
def __init__(self,
cluster_distribs,
termination_threshold=2.5,
terminate_bonus=0.0,
sparse_reward=False,
reward_range=10):
"""Reward depends on clustering sprites based on color/shape.
We indicate what feature matters for the clustering with the list of
cluster distribs. We can then compute intra-extra pairwise distances and use
the Davies-Bouldin clustering metric.
See https://en.wikipedia.org/wiki/Cluster_analysis#Internal_evaluation for
some discussion about different metrics.
Args:
cluster_distribs: list of factor distributions defining the clusters.
termination_threshold: Threshold that the metric should pass to terminate
an episode. Default of 2.5 seems to work well for 2 or 3 clusters.
terminate_bonus: Extra bonus upon task success.
sparse_reward: Boolean (default True), whether to provide dense shaping
rewards or just the sparse ones at the end of an episode.
reward_range: Scalar, specifies range [-reward_range, 0] we remap the
rewards to whenever possible.
"""
self._cluster_distribs = cluster_distribs
self._num_clusters = len(cluster_distribs)
self._termination_threshold = termination_threshold
self._terminate_bonus = terminate_bonus
self._sparse_reward = sparse_reward
self._reward_range = reward_range
def _cluster_assignments(self, sprites):
"""Return index of cluster for all sprites."""
clusters = -np.ones(len(sprites), dtype='int')
for i, sprite in enumerate(sprites):
for c_i, distrib in enumerate(self._cluster_distribs):
if distrib.contains(sprite.factors):
clusters[i] = c_i
break
return clusters
def _compute_clustering_metric(self, sprites):
"""Compute the different clustering metrics, higher should be better."""
# Get positions of sprites, and their cluster assignments
cluster_assignments = self._cluster_assignments(sprites)
positions = np.array([sprite.position for sprite in sprites])
# Ignore objects unassigned to any cluster
positions = positions[cluster_assignments >= 0]
cluster_assignments = cluster_assignments[cluster_assignments >= 0]
return 1. / metrics.davies_bouldin_score(positions, cluster_assignments)
def reward(self, sprites):
"""Calculate reward from sprites.
Recommendation: Use Davies-Bouldin, with termination_threshold left to auto.
Args:
sprites: list of Sprites.
Returns:
Reward, high when clustering is good.
"""
reward = 0.
metric = self._compute_clustering_metric(sprites)
# Low DB index is better clustering
dense_reward = (metric -
self._termination_threshold) * self._reward_range / 2.
if metric >= self._termination_threshold: # task succeeded
reward += self._terminate_bonus
reward += dense_reward
elif not self._sparse_reward:
reward += dense_reward
return reward
def success(self, sprites):
metric = self._compute_clustering_metric(sprites)
return metric >= self._termination_threshold
class MetaAggregated(AbstractTask):
"""Combines several tasks together."""
REWARD_AGGREGATOR = {
'sum': np.nansum,
'max': np.nanmax,
'min': np.nanmin,
'mean': np.nanmean
}
TERMINATION_CRITERION = {'all': np.all, 'any': np.any}
def __init__(self,
subtasks,
reward_aggregator='sum',
termination_criterion='all',
terminate_bonus=0.0):
"""MetaTasks which combines rewards between several subtasks.
Args:
subtasks: Iterable of Tasks.
reward_aggregator: (string) how to combine rewards together. One of
('sum', 'max', 'min', 'mean').
termination_criterion: (string) how to decide when to terminate, given
subtasks' termination signals. One of ('all', 'any')
terminate_bonus: Extra bonus for solving all subtasks, combined with
termination_criterion.
"""
if reward_aggregator not in MetaAggregated.REWARD_AGGREGATOR:
raise ValueError('Unknown reward_aggregator. {} not in {}'.format(
reward_aggregator, MetaAggregated.REWARD_AGGREGATOR))
if termination_criterion not in MetaAggregated.TERMINATION_CRITERION:
raise ValueError('Unknown termination_criterion. {} not in {}'.format(
termination_criterion, MetaAggregated.TERMINATION_CRITERION))
self._subtasks = subtasks
self._reward_aggregator = MetaAggregated.REWARD_AGGREGATOR[
reward_aggregator]
self._termination_criterion = MetaAggregated.TERMINATION_CRITERION[
termination_criterion]
self._terminate_bonus = terminate_bonus
def reward(self, sprites):
rewards = self._reward_aggregator(
[task.reward(sprites) for task in self._subtasks])
rewards += self._terminate_bonus * self.success(sprites)
return rewards
def success(self, sprites):
return self._termination_criterion(
[task.success(sprites) for task in self._subtasks])
|
spriteworld-master
|
spriteworld/tasks.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Constants for shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import numpy as np
from spriteworld import shapes
# A selection of simple shapes
SHAPES = {
'triangle': shapes.polygon(num_sides=3, theta_0=np.pi/2),
'square': shapes.polygon(num_sides=4, theta_0=np.pi/4),
'pentagon': shapes.polygon(num_sides=5, theta_0=np.pi/2),
'hexagon': shapes.polygon(num_sides=6),
'octagon': shapes.polygon(num_sides=8),
'circle': shapes.polygon(num_sides=30),
'star_4': shapes.star(num_sides=4, theta_0=np.pi/4),
'star_5': shapes.star(num_sides=5, theta_0=np.pi + np.pi/10),
'star_6': shapes.star(num_sides=6),
'spoke_4': shapes.spokes(num_sides=4, theta_0=np.pi/4),
'spoke_5': shapes.spokes(num_sides=5, theta_0=np.pi + np.pi/10),
'spoke_6': shapes.spokes(num_sides=6),
}
class ShapeType(enum.IntEnum):
"""Enumerate SHAPES, useful for a state description of the environment."""
triangle = 1
square = 2
pentagon = 3
hexagon = 4
octagon = 5
circle = 6
star_4 = 7
star_5 = 8
star_6 = 9
spoke_4 = 10
spoke_5 = 11
spoke_6 = 12
|
spriteworld-master
|
spriteworld/constants.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
|
spriteworld-master
|
spriteworld/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Spriteworld sprite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from matplotlib import path as mpl_path
from matplotlib import transforms as mpl_transforms
import numpy as np
from spriteworld import constants
FACTOR_NAMES = (
'x', # x-position of sprite center-of-mass (float)
'y', # y-position of sprite center-of-mass (float)
'shape', # shape (string)
'angle', # angle in degrees (scalar)
'scale', # size of sprite (float)
'c0', # first color component (scalar)
'c1', # second color component (scalar)
'c2', # third color component (scalar)
'x_vel', # x-component of velocity (float)
'y_vel', # y-component of velocity (float)
)
# Just to catch infinite while-looping. Anything >1e4 should be plenty safe.
_MAX_TRIES = int(1e6)
class Sprite(object):
"""Sprite class.
Sprites are simple shapes parameterized by a few factors (position, shape,
angle, scale, color, velocity). They are the building blocks of Spriteworld,
so every Spriteworld environment state is simple a collection of sprites.
We assume that (x, y) are in mathematical coordinates, i.e. (0, 0) is at the
lower-left of the frame.
"""
def __init__(self,
x=0.5,
y=0.5,
shape='square',
angle=0,
scale=0.1,
c0=0,
c1=0,
c2=0,
x_vel=0.0,
y_vel=0.0):
"""Construct sprite.
This class is agnostic to the color scheme, namely (c1, c2, c3) could be in
RGB coordinates or HSV, HSL, etc. without this class knowing. The color
scheme conversion for rendering must be done in the renderer.
Args:
x: Float in [0, 1]. x-position.
y: Float in [0, 1]. y-position.
shape: String. Shape of the sprite. Must be a key of constants.SHAPES.
angle: Int. Angle in degrees.
scale: Float in [0, 1]. Scale of the sprite, from a point to the area of
the entire frame. This scales linearly with respect to sprite width,
hence with power 1/2 with respect to sprite area.
c0: Scalar. First coordinate of color.
c1: Scalar. Second coordinate of color.
c2: Scalar. Third coordinate of color.
x_vel: Float. x-velocity.
y_vel: Float. y-velocity.
"""
self._position = np.array([x, y])
self._shape = shape
self._angle = angle
self._scale = scale
self._color = (c0, c1, c2)
self._velocity = (x_vel, y_vel)
self._reset_centered_path()
def _reset_centered_path(self):
path = mpl_path.Path(constants.SHAPES[self._shape])
scale_rotate = (
mpl_transforms.Affine2D().scale(self._scale) +
mpl_transforms.Affine2D().rotate_deg(self._angle))
self._centered_path = scale_rotate.transform_path(path)
def move(self, motion, keep_in_frame=False):
"""Move the sprite, optionally keeping its centerpoint within the frame."""
self._position += motion
if keep_in_frame:
self._position = np.clip(self._position, 0.0, 1.0)
def update_position(self, keep_in_frame=False):
"""Update position based on velocity."""
self.move(self.velocity, keep_in_frame=keep_in_frame)
def contains_point(self, point):
"""Check if the point is contained in the Sprite."""
return self._centered_path.contains_point(point - self.position)
def sample_contained_position(self):
"""Sample random position uniformly within sprite."""
low = np.min(self._centered_path.vertices, axis=0)
high = np.max(self._centered_path.vertices, axis=0)
for _ in range(_MAX_TRIES):
sample = self._position + np.random.uniform(low, high)
if self.contains_point(sample):
return sample
raise ValueError('max_tries exceeded. There is almost surely an error in '
'the SpriteWorld library code.')
@property
def vertices(self):
"""Numpy array of vertices of the shape."""
transform = mpl_transforms.Affine2D().translate(*self._position)
path = transform.transform_path(self._centered_path)
return path.vertices
@property
def out_of_frame(self):
return not (np.all(self._position >= [0., 0.]) and
np.all(self._position <= [1., 1.]))
@property
def x(self):
return self._position[0]
@property
def y(self):
return self._position[1]
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, s):
self._shape = s
self._reset_centered_path()
@property
def angle(self):
return self._angle
@angle.setter
def angle(self, a):
rotate = mpl_transforms.Affine2D().rotate_deg(a - self._angle)
self._centered_path = rotate.transform_path(self._centered_path)
self._angle = a
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, s):
rescale = mpl_transforms.Affine2D().scale(s - self._scale)
self._centered_path = rescale.transform_path(self._centered_path)
self._scale = s
@property
def c0(self):
return self._color[0]
@property
def c1(self):
return self._color[1]
@property
def c2(self):
return self._color[2]
@property
def x_vel(self):
return self._velocity[0]
@property
def y_vel(self):
return self._velocity[1]
@property
def color(self):
return self._color
@property
def position(self):
return self._position
@property
def velocity(self):
return self._velocity
@property
def factors(self):
factors = collections.OrderedDict()
for factor_name in FACTOR_NAMES:
factors[factor_name] = getattr(self, factor_name)
return factors
|
spriteworld-master
|
spriteworld/sprite.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Spriteworld environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dm_env
import numpy as np
import six
class Environment(dm_env.Environment):
"""Environment class for Spriteworld.
This environment uses the `dm_env` interface. For details, see
https://github.com/deepmind/dm_env
"""
def __init__(self,
task,
action_space,
renderers,
init_sprites,
keep_in_frame=True,
max_episode_length=1000,
metadata=None):
"""Construct Spriteworld environment.
Args:
task: Object with methods:
- reward: sprites -> float.
- success: sprites -> bool.
action_space: Action space with methods:
- step: action, sprites, keep_in_frame -> reward.
- action_spec: Callable returning ArraySpec or list/dict of such.
renderers: Dict where values are renderers and keys are names, reflected
in the keys of the observation.
init_sprites: Callable returning iterable of sprites, called upon
environment reset.
keep_in_frame: Bool. Whether to keep sprites in frame when they move. This
prevents episodes from terminating frequently when an agent moves a
sprite out of frame.
max_episode_length: Maximum number of steps beyond which episode will be
terminated.
metadata: Optional object to be added to the global_state.
"""
self._task = task
self._action_space = action_space
self._renderers = renderers
self._init_sprites = init_sprites
self._keep_in_frame = keep_in_frame
self._max_episode_length = max_episode_length
self._sprites = self._init_sprites()
self._step_count = 0
self._reset_next_step = True
self._renderers_initialized = False
self._metadata = metadata
def reset(self):
self._sprites = self._init_sprites()
self._step_count = 0
self._reset_next_step = False
return dm_env.restart(self.observation())
def success(self):
return self._task.success(self._sprites)
def should_terminate(self):
timeout = self._step_count >= self._max_episode_length
out_of_frame = any([sprite.out_of_frame for sprite in self._sprites])
return self.success() or out_of_frame or timeout
def step(self, action):
"""Step the environment with an action."""
if self._reset_next_step:
return self.reset()
self._step_count += 1
reward = self._action_space.step(
action, self._sprites, keep_in_frame=self._keep_in_frame)
# Update sprite positions from their velocities
for sprite in self._sprites:
sprite.update_position(keep_in_frame=self._keep_in_frame)
reward += self._task.reward(self._sprites)
observation = self.observation()
if self.should_terminate():
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=observation)
else:
return dm_env.transition(reward=reward, observation=observation)
def sample_contained_position(self):
"""Sample a random position contained in a sprite.
This is useful for hand-crafted random agents.
Note that this function does not uniformly sample with respect to sprite
areas. Instead, it randomly selects a sprite, then selects a random position
within that sprite. Consequently, small sprites are represented equally to
large sprites, and in the case of occlusion forground sprites may be
overrepresented relative to background sprites.
Returns:
Float numpy array of shape (2,) in [0, 1]. Position contained in one of
the sprites.
"""
sprite = self._sprites[np.random.randint(len(self._sprites))]
return sprite.sample_contained_position()
def state(self):
global_state = {
'success': self.success(),
}
if self._metadata:
global_state['metadata'] = self._metadata
return {'sprites': self._sprites, 'global_state': global_state}
def observation(self):
state = self.state()
observation = {
name: renderer.render(**state)
for name, renderer in six.iteritems(self._renderers)
}
return observation
def observation_spec(self):
if not self._renderers_initialized:
# Force a rendering so that the sizes of observeration_specs are correct.
self.observation()
self._renderers_initialized = True
renderer_spec = {
name: renderer.observation_spec()
for name, renderer in six.iteritems(self._renderers)
}
return renderer_spec
def action_spec(self):
return self._action_space.action_spec()
@property
def action_space(self):
return self._action_space
|
spriteworld-master
|
spriteworld/environment.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Wrapper to make Spriteworld conform to the OpenAI Gym interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_env import specs
from gym import spaces
import numpy as np
def _spec_to_space(spec):
"""Convert dm_env.specs to gym.Spaces."""
if isinstance(spec, list):
return spaces.Tuple([_spec_to_space(s) for s in spec])
elif isinstance(spec, specs.DiscreteArray):
return spaces.Discrete(spec.num_values)
elif isinstance(spec, specs.BoundedArray):
return spaces.Box(
np.asscalar(spec.minimum),
np.asscalar(spec.maximum),
shape=spec.shape,
dtype=spec.dtype)
else:
raise ValueError('Unknown type for specs: {}'.format(spec))
class GymWrapper(object):
"""Wraps a Spriteworld environment into a Gym interface.
Observations will be a dictionary, with the same keys as the 'renderers' dict
provided when constructing a Spriteworld environment. Rendering is always
performed, so calling render() is a no-op.
"""
metadata = {'render.modes': ['rgb_array']}
def __init__(self, env):
self._env = env
self._last_render = None
self._action_space = None
self._observation_space = None
# Reset Spriteworld to setup the observation_specs correctly
self._env.reset()
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
if self._observation_space is None:
components = {}
for key, value in self._env.observation_spec().items():
components[key] = spaces.Box(
-np.inf, np.inf, value.shape, dtype=value.dtype)
self._observation_space = spaces.Dict(components)
return self._observation_space
@property
def action_space(self):
if self._action_space is None:
self._action_space = _spec_to_space(self._env.action_spec())
return self._action_space
def _process_obs(self, obs):
"""Convert and processes observations."""
for k, v in obs.items():
obs[k] = np.asarray(v)
if obs[k].dtype == np.bool:
# Convert boolean 'success' into an float32 to predict it.
obs[k] = obs[k].astype(np.float32)
if k == 'image':
self._last_render = obs[k]
return obs
def step(self, action):
"""Main step function for the environment.
Args:
action: Array R^4
Returns:
obs: dict of observations. Follows from the 'renderers' configuration
provided as parameters to Spriteworld.
reward: scalar reward.
done: True if terminal state.
info: dict with extra information (e.g. discount factor).
"""
time_step = self._env.step(action)
obs = self._process_obs(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
"""Reset environment.
Returns:
obs: dict of observations. Follows from the 'renderers' configuration
provided as parameters to Spriteworld.
"""
time_step = self._env.reset()
return self._process_obs(time_step.observation)
def render(self, mode='rgb_array'):
"""Render function, noop for compatibility.
Args:
mode: unused, always returns an RGB array.
Returns:
Last RGB observation (cached from last observation with key 'image')
"""
del mode
return self._last_render
def close(self):
"""Unused."""
pass
|
spriteworld-master
|
spriteworld/gym_wrapper.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Interactive GUI for Spriteworld.
Be aware that this UI overrides the action space and renderer for ease of
playing, so those will be different from what are specified in the task config.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as log
import sys
from absl import logging
from matplotlib import gridspec
import matplotlib.pylab as plt
import numpy as np
from spriteworld import action_spaces
from spriteworld import environment
from spriteworld import renderers
class MatplotlibUI(object):
"""Class for visualising the environment based on Matplotlib."""
def __init__(self):
self.rewards = 10 * [np.nan]
self.rewards_bounds = [-10, 10]
self.last_success = None
plt.ion()
self._fig = plt.figure(
figsize=(9, 12), num='Spriteworld', facecolor='white')
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._ax_image = plt.subplot(gs[0])
self._ax_image.axis('off')
self._ax_scalar = plt.subplot(gs[1])
self._ax_scalar.spines['right'].set_visible(False)
self._ax_scalar.spines['top'].set_visible(False)
self._ax_scalar.xaxis.set_ticks_position('bottom')
self._ax_scalar.yaxis.set_ticks_position('left')
self._setup_callbacks()
@property
def ax_image(self):
return self._ax_image
def _setup_callbacks(self):
"""Default callbacks for the UI."""
# Pressing escape should stop the UI
def _onkeypress(event):
if event.key == 'escape':
# Stop UI
logging.info('Pressed escape, stopping UI.')
plt.close(self._fig)
sys.exit()
self._fig.canvas.mpl_connect('key_release_event', _onkeypress)
# Disable default keyboard shortcuts
for key in ('keymap.fullscreen', 'keymap.home', 'keymap.back',
'keymap.forward', 'keymap.pan', 'keymap.zoom', 'keymap.save',
'keymap.quit', 'keymap.grid', 'keymap.yscale', 'keymap.xscale',
'keymap.all_axes'):
plt.rcParams[key] = ''
# Disable logging of some matplotlib events
log.getLogger('matplotlib').setLevel('WARNING')
def _draw_observation(self, image, action):
"""Draw the latest observation."""
self._ax_image.clear()
self._ax_image.imshow(image, interpolation='none')
self._ax_image.set_xticks([])
self._ax_image.set_yticks([])
if action is not None:
self._ax_image.annotate(
'',
xycoords='axes fraction',
xy=action[:2], # Start of arrow
xytext=action[2:], # End of arrow
arrowprops={
'arrowstyle': '<|-',
'color': 'red',
'lw': 4,
})
# Indicate success
linewidth = 1
color = 'black'
if np.isnan(self.rewards[-1]):
linewidth = 8
color = 'green' if self.last_success else 'red'
for sp in self._ax_image.spines.values():
sp.set_color(color)
sp.set_linewidth(linewidth)
def _draw_rewards(self):
"""Draw the past rewards plot."""
self._ax_scalar.clear()
self._ax_scalar.set_ylabel('Rewards')
self._ax_scalar.set_xlabel('Timestep')
xs = np.arange(-len(self.rewards), 0)
self._ax_scalar.set_xticks(xs)
self._ax_scalar.axhline(y=0.0, color='lightgrey', linestyle='--')
self._ax_scalar.stem(xs, self.rewards, basefmt=' ')
self._ax_scalar.set_xlim((xs[0] - 1.0, xs[-1] + 1.0))
self._ax_scalar.set_ylim(
(self.rewards_bounds[0] - 1.0, self.rewards_bounds[1] + 1.0))
def register_callback(self, event_name, callback):
"""Register a callback for the given event."""
self._fig.canvas.mpl_connect(event_name, callback)
def update(self, timestep, action):
"""Update the visualisation with the latest timestep and action."""
reward = timestep.reward
if reward is None:
reward = np.nan
self.rewards = self.rewards[1:] + [reward]
self.rewards_bounds[0] = np.nanmin(
[np.nanmin(self.rewards), self.rewards_bounds[0]])
self.rewards_bounds[1] = np.nanmax(
[np.nanmax(self.rewards), self.rewards_bounds[1]])
self._draw_observation(timestep.observation['image'], action)
self._draw_rewards()
plt.show(block=False)
self.last_success = timestep.observation['success']
class HumanDragAndDropAgent(object):
"""Demo agent for mouse-clicking interface with DragAndDrop action space."""
def __init__(self, action_space, timeout=600):
self._action_space = action_space
self._click = None
self._timeout = timeout
def help(self):
logging.info('Click to select an object, then click again to select where '
'to move it.')
def register_callbacks(self, ui):
"""Register the matplotlib callbacks required by the agent."""
def _onclick(event):
if event.inaxes and event.inaxes == ui.ax_image:
# Map the click into axis-fraction positions (origin at bottom-left).
self._click = event.inaxes.transAxes.inverted().transform(
(event.x, event.y))
else:
self._click = None
return
ui.register_callback('button_press_event', _onclick)
def begin_episode(self):
logging.info('Starting episode')
def step(self, timestep):
"""Take a step."""
del timestep # Unused
def _get_click():
"""Get mouse click."""
click = None
while click is None:
x = plt.waitforbuttonpress(timeout=self._timeout)
if x is None:
logging.info('Timed out. You took longer than %d seconds to click.',
self._timeout)
elif x:
logging.info('You pressed a key, but were supposed to click with the '
'mouse.')
self.help()
else:
click = self._click
return click
def _get_action():
"""Get action from user."""
logging.info('Select sprite')
click_from = _get_click()
logging.info('Select target')
click_to = _get_click()
try:
action = np.concatenate((click_from, click_to)).astype(np.float32)
if any(np.isnan(action)):
raise ValueError
self._action_space.action_spec().validate(action)
return action
except (ValueError, TypeError):
logging.info('Select a valid action')
return _get_action()
action = _get_action()
return action
class HumanEmbodiedAgent(object):
"""Demo agent for keyboard interface with Embodied action space."""
MOTION_KEY_TO_ACTION = {
'up': 0,
'left': 1,
'down': 2,
'right': 3,
'w': 0,
'a': 1,
's': 2,
'd': 3
}
def __init__(self, action_space, timeout=600):
self._action_space = action_space
self._key_press = None
self._carry = False
self._movement = None
self._timeout = timeout
def help(self):
logging.info('Use WASD/arrow keys to move, hold Space to carry.')
def register_callbacks(self, ui):
"""Register the matplotlib callbacks required by the agent."""
def _onkeypress(event):
if event.key in HumanEmbodiedAgent.MOTION_KEY_TO_ACTION:
self._movement = HumanEmbodiedAgent.MOTION_KEY_TO_ACTION[event.key]
elif event.key == ' ':
self._carry = True
else:
self.help()
ui.register_callback('key_press_event', _onkeypress)
def _onkeyrelease(event):
if event.key == ' ':
self._carry = False
elif event.key in HumanEmbodiedAgent.MOTION_KEY_TO_ACTION:
self._movement = None
ui.register_callback('key_release_event', _onkeyrelease)
def begin_episode(self):
logging.info('Starting episode')
def step(self, timestep):
"""Take a step."""
del timestep # Unused
def _wait_for_movement_key_press():
"""Get key press."""
ready = False
while not ready:
x = plt.waitforbuttonpress(timeout=self._timeout)
if x is None:
logging.info('Timed out. You took longer than %d seconds to click.',
self._timeout)
elif not x:
logging.info('You clicked, but you are supposed to use the Keyboard.')
self.help()
elif self._movement is not None:
ready = True
def _get_action():
"""Get action from user."""
_wait_for_movement_key_press()
action = (int(self._carry), self._movement)
for spec, a in zip(self._action_space.action_spec(), action):
spec.validate(a)
return action
return _get_action()
def setup_run_ui(env_config, render_size, task_hsv_colors, anti_aliasing):
"""Start a Demo UI given an env_config."""
if isinstance(env_config['action_space'], action_spaces.SelectMove):
# DragAndDrop is a bit easier to demo than the SelectMove action space
env_config['action_space'] = action_spaces.DragAndDrop(scale=0.5)
agent = HumanDragAndDropAgent(env_config['action_space'])
elif isinstance(env_config['action_space'], action_spaces.Embodied):
agent = HumanEmbodiedAgent(env_config['action_space'])
else:
raise ValueError(
'Demo is not configured to run with action space {}.'.format(
env_config['action_space']))
env_config['renderers'] = {
'image':
renderers.PILRenderer(
image_size=(render_size, render_size),
color_to_rgb=renderers.color_maps.hsv_to_rgb
if task_hsv_colors else None,
anti_aliasing=anti_aliasing),
'success':
renderers.Success()
}
env = environment.Environment(**env_config)
ui = MatplotlibUI()
agent.register_callbacks(ui)
# Start RL loop
timestep = env.reset()
ui.update(timestep, action=None)
while True:
action = agent.step(timestep)
timestep = env.step(action)
if isinstance(env_config['action_space'], action_spaces.DragAndDrop):
ui.update(timestep, action)
else:
ui.update(timestep, None)
|
spriteworld-master
|
spriteworld/demo_ui.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Action spaces for Spriteworld.
This file contains action space classes compatible with Spriteworld.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_env import specs
import numpy as np
class SelectMove(object):
"""Select-Move action space.
This action space takes in a continuous vector of length 4 with each component
in [0, 1]. This can be intuited as representing two consecutive clicks:
[first_x, first_y, second_x, second_y].
These two clicks are then processed to generate a position and a motion:
* Position = [first_x, first_y]
* Motion = scale * [second_x - 0.5, second_y - 0.5]
If the Position, viewed as a point in the arena, lies inside of a sprite, that
sprite will be moved by Motion, which is a scaled version of the second click
relative to the center of the arena. If the Position does not lie inside of a
sprite then no sprite will move. So to move a sprite you have to click on it
and click on the direction you want it to move, like a touch screen.
There is an optional control cost proportional to the norm of the motion.
"""
def __init__(self, scale=1.0, motion_cost=0.0, noise_scale=None):
"""Constructor.
Args:
scale: Multiplier by which the motion is scaled down. Should be in [0.0,
1.0].
motion_cost: Factor by which motion incurs cost.
noise_scale: Optional stddev of the noise. If scalar, applied to all
action space components. If vector, must have same shape as action.
"""
self._scale = scale
self._motion_cost = motion_cost
self._noise_scale = noise_scale
self._action_spec = specs.BoundedArray(
shape=(4,), dtype=np.float32, minimum=0.0, maximum=1.0)
def get_motion(self, action):
delta_pos = (action[2:] - 0.5) * self._scale
return delta_pos
def apply_noise_to_action(self, action):
if self._noise_scale:
noise = np.random.normal(
loc=0.0, scale=self._noise_scale, size=action.shape)
return action + noise
else:
return action
def get_sprite_from_position(self, position, sprites):
for sprite in sprites[::-1]:
if sprite.contains_point(position):
return sprite
return None
def step(self, action, sprites, keep_in_frame):
"""Take an action and move the sprites.
Args:
action: Numpy array of shape (4,) in [0, 1]. First two components are the
position selection, second two are the motion selection.
sprites: Iterable of sprite.Sprite() instances. If a sprite is moved by
the action, its position is updated.
keep_in_frame: Bool. Whether to force sprites to stay in the frame by
clipping their centers of mass to be in [0, 1].
Returns:
Scalar cost of taking this action.
"""
noised_action = self.apply_noise_to_action(action)
position = noised_action[:2]
motion = self.get_motion(noised_action)
clicked_sprite = self.get_sprite_from_position(position, sprites)
if clicked_sprite is not None:
clicked_sprite.move(motion, keep_in_frame=keep_in_frame)
return -self._motion_cost * np.linalg.norm(motion)
def sample(self):
"""Sample an action uniformly randomly."""
return np.random.uniform(0., 1., size=(4,))
def action_spec(self):
return self._action_spec
class DragAndDrop(SelectMove):
"""Drag-And-Drop action space.
This action space takes in a continuous vector of length 4 with each component
in [0, 1]. This can be intuited as representing two consecutive clicks:
[first_x, first_y, second_x, second_y].
These two clicks are then processed to generate a position and a motion:
* Position = [first_x, first_y]
* Motion = scale * [second_x - first_x, second_y - first_y]
* Target = [second_x, second_y]
Similarly to SelectMove, a sprite will only move if Position lies in it. The
only difference is here the Motion is relative to the Position, instead of
relative to the center of the screen. So the second click effectively
specifies a target location towards which the sprite moves.
"""
def get_motion(self, action):
pos = action[:2]
target = action[2:]
delta_pos = (target - pos) * self._scale
return delta_pos
class Embodied(object):
"""Embodied-Grid action space.
This action space treats sprites[-1] (the foreground sprite) as the agent's
body.
The action space has two components. The first is a binary `Carry/Don't Carry`
component which allows the agent to carry the sprite immediately beneath it as
it moves, if there is such a sprite. The second controls the agent's motion
and consists of `Up/Down/Left/Right` options.
"""
def __init__(self, step_size=0.05, motion_cost=0.):
"""Constructor.
Args:
step_size: Fraction of the arena width the sprite moves for each step.
motion_cost: Each step incurs cost motion_cost * step_size.
"""
self._step_size = step_size
self._motion_cost = motion_cost
self._action_spec = [
specs.DiscreteArray(num_values=2, dtype=np.int64),
specs.DiscreteArray(num_values=4, dtype=np.int64),
]
self.action_to_motion = {
0: np.array([0, self._step_size]), # Up
1: np.array([-self._step_size, 0]), # Left
2: np.array([0, -self._step_size]), # Down
3: np.array([self._step_size, 0]), # Right
}
def get_body_sprite(self, sprites):
"""Return the sprite representing the agent's body."""
return sprites[-1]
def get_non_body_sprites(self, sprites):
"""Return all sprites except that representing the agent's body."""
return sprites[:-1]
def get_carried_sprite(self, sprites):
body_position = self.get_body_sprite(sprites).position
for sprite in self.get_non_body_sprites(sprites)[::-1]:
if sprite.contains_point(body_position):
return sprite
return None
def step(self, action, sprites, keep_in_frame):
"""Take an action and move the sprites.
Args:
action: Iterable of length 2. First component must be in [0, 1] and second
component must be in [0, 1, 2, 3].
sprites: Iterable of sprite.Sprite() instances. sprites[-1] is the agent's
body.
keep_in_frame: Bool. Whether to force sprites to stay in the frame by
clipping their centers of mass to be in [0, 1].
Returns:
Scalar cost of taking this action.
"""
carry = action[0]
motion = self.action_to_motion[action[1]]
# Move carried sprite if necessary
if carry:
carried_sprite = self.get_carried_sprite(sprites)
if carried_sprite is not None:
carried_sprite.move(motion, keep_in_frame=keep_in_frame)
# Move agent body
self.get_body_sprite(sprites).move(motion, keep_in_frame=keep_in_frame)
return -self._motion_cost * self._step_size
def sample(self):
"""Sample an action uniformly randomly."""
return [np.random.randint(0, 2), np.random.randint(0, 4)]
def action_spec(self):
return self._action_spec
|
spriteworld-master
|
spriteworld/action_spaces.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Functions to transform between color spaces for rendering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import numpy as np
def hsv_to_rgb(c):
"""Convert HSV tuple to RGB tuple."""
return tuple((255 * np.array(colorsys.hsv_to_rgb(*c))).astype(np.uint8))
|
spriteworld-master
|
spriteworld/renderers/color_maps.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Abstract base class for renderers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractRenderer(object):
"""Abstract base class for renderers."""
@abc.abstractmethod
def render(self, sprites=(), global_state=None):
"""Renderer the sprites and global_state.
Args:
sprites: Iterable of sprites to be rendered.
global_state: May contain extra information for rendering (e.g.
background, symbolic/linguistic data, etc.).
"""
@abc.abstractmethod
def observation_spec(self):
"""Get observation spec for the output.
Returns:
ArraySpec or nested structure of such. Must agree with the output of
self.update().
"""
|
spriteworld-master
|
spriteworld/renderers/abstract_renderer.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Expose renderer classes."""
from spriteworld.renderers import color_maps
from spriteworld.renderers.abstract_renderer import AbstractRenderer
from spriteworld.renderers.handcrafted import SpriteFactors
from spriteworld.renderers.handcrafted import SpritePassthrough
from spriteworld.renderers.handcrafted import Success
from spriteworld.renderers.pil_renderer import PILRenderer
|
spriteworld-master
|
spriteworld/renderers/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Handcrafted renderers for Spriteworld."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_env import specs
import numpy as np
from spriteworld import constants
from spriteworld import sprite as sprite_lib
from spriteworld.renderers import abstract_renderer
class SpriteFactors(abstract_renderer.AbstractRenderer):
"""Aggregates factors of the sprites into an array."""
def __init__(self, factors=sprite_lib.FACTOR_NAMES):
"""Constructor.
Outputs a list of dicts: [{object 1 factors} {object 2 factors} ...]
Args:
factors: Iterable of strings. Factors to record. Must be a subset of
sprite.FACTOR_NAMES.
"""
if not set(factors).issubset(set(sprite_lib.FACTOR_NAMES)):
raise ValueError('Factors have to belong to {}.'.format(
sprite_lib.FACTOR_NAMES))
self._num_sprites = None
self._factors = factors
self._per_object_spec = {
factor: specs.Array(shape=(), dtype=np.float32) for factor in factors
}
def render(self, sprites=(), global_state=None):
"""Renders a list of sprites into a list of sprite factors.
Args:
sprites: a list of sprites with a method `get_sprite`. This method
receives a single argument `upscale_factor`, and returns a pygame
sprite.
global_state: Unused global state.
Returns:
A list of dictionaries of factor -> values mappings.
"""
del global_state
# Set number of sprites so that observation_spec is callable
self._num_sprites = len(sprites)
def _process_factor(name, value):
if name == 'shape':
value = constants.ShapeType[value].value
return float(value)
def _sprite_to_factors(sprite):
return {
factor: _process_factor(factor, getattr(sprite, factor))
for factor in self._factors
}
return np.array([_sprite_to_factors(sprite) for sprite in sprites])
def observation_spec(self):
return [self._per_object_spec for _ in range(self._num_sprites)]
class SpritePassthrough(abstract_renderer.AbstractRenderer):
"""Passes the list of Sprites directly as observation."""
def __init__(self):
"""Constructor."""
self._num_sprites = None
def render(self, sprites=(), global_state=None):
"""Sends the sprites (e.g. list of Sprites) directly through.
Args:
sprites: a list of sprites with a method `get_sprite`. This method
receives a single argument `upscale_factor`, and returns a pygame
sprite.
global_state: Unused global state.
Returns:
A numpy array containing the concatenation of all desired attributes of
all sprites.
"""
del global_state
self._num_sprites = len(sprites)
return sprites
def observation_spec(self):
return specs.Array(shape=(self._num_sprites,), dtype=np.object)
class Success(abstract_renderer.AbstractRenderer):
"""Renders whether a task has been successfully solved."""
def render(self, sprites=(), global_state=None):
"""Returns task success.
Args:
sprites: Unused iterable of sprites.
global_state: Must be a dictionary with key 'success'.
Returns:
Boolean indicating success.
"""
return global_state['success']
def observation_spec(self):
return specs.Array(shape=(), dtype=np.bool)
|
spriteworld-master
|
spriteworld/renderers/handcrafted.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Python Image Library (PIL/Pillow) renderer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_env import specs
import numpy as np
from PIL import Image
from PIL import ImageDraw
from spriteworld.renderers import abstract_renderer
class PILRenderer(abstract_renderer.AbstractRenderer):
"""Render using Python Image Library (PIL/Pillow)."""
def __init__(self,
image_size=(64, 64),
anti_aliasing=1,
bg_color=None,
color_to_rgb=None):
"""Construct PIL renderer.
Args:
image_size: Int tuple (height, width). Size of output of .render().
anti_aliasing: Int. Anti-aliasing factor. Linearly scales the size of the
internal canvas.
bg_color: None or 3-tuple of ints in [0, 255]. Background color. If None,
background is (0, 0, 0).
color_to_rgb: Callable converting a tuple (c1, c2, c3) to a uint8 tuple
(r, g, b) in [0, 255].
"""
self._image_size = image_size
self._anti_aliasing = anti_aliasing
self._canvas_size = (anti_aliasing * image_size[0],
anti_aliasing * image_size[1])
if color_to_rgb is None:
color_to_rgb = lambda x: x
self._color_to_rgb = color_to_rgb
if bg_color is None:
bg_color = (0, 0, 0)
self._canvas_bg = Image.new('RGB', self._canvas_size, bg_color)
self._observation_spec = specs.Array(
shape=self._image_size + (3,), dtype=np.uint8)
self._canvas = Image.new('RGB', self._canvas_size)
self._draw = ImageDraw.Draw(self._canvas)
def render(self, sprites=(), global_state=None):
"""Render sprites.
Sprites are ordered from background to foreground.
Args:
sprites: Iterable of sprite.Sprite instances.
global_state: Unused global state.
Returns:
Numpy uint8 RGB array of size self._image_size + (3,).
"""
self._canvas.paste(self._canvas_bg)
for obj in sprites:
vertices = self._canvas_size * obj.vertices
color = self._color_to_rgb(obj.color)
self._draw.polygon([tuple(v) for v in vertices], fill=color)
image = self._canvas.resize(self._image_size, resample=Image.ANTIALIAS)
# PIL uses a coordinate system with the origin (0, 0) at the upper-left, but
# our environment uses an origin at the bottom-left (i.e. mathematical
# convention). Hence we need to flip the render vertically to correct for
# that.
image = np.flipud(np.array(image))
return image
def observation_spec(self):
return self._observation_spec
|
spriteworld-master
|
spriteworld/renderers/pil_renderer.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
|
spriteworld-master
|
spriteworld/configs/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Goal-Finding task for embodied agent.
In this task there are target sprites of orange-green-ish color. All target
sprites must be brought to the goal location, which is the center of the arena.
There are also distractor sprites, which are blue-purple-ish color and do not
contribute to the reward.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from spriteworld import action_spaces
from spriteworld import factor_distributions as distribs
from spriteworld import renderers as spriteworld_renderers
from spriteworld import sprite_generators
from spriteworld import tasks
TERMINATE_DISTANCE = 0.075
NUM_TARGETS = lambda: np.random.randint(1, 4)
NUM_DISTRACTORS = lambda: np.random.randint(1, 4)
def get_config(mode=None):
"""Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
del mode
shared_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
target_hue = distribs.Continuous('c0', 0., 0.4)
distractor_hue = distribs.Continuous('c0', 0.5, 0.9)
target_factors = distribs.Product([
target_hue,
shared_factors,
])
distractor_factors = distribs.Product([
distractor_hue,
shared_factors,
])
target_sprite_gen = sprite_generators.generate_sprites(
target_factors, num_sprites=NUM_TARGETS)
distractor_sprite_gen = sprite_generators.generate_sprites(
distractor_factors, num_sprites=NUM_DISTRACTORS)
sprite_gen = sprite_generators.chain_generators(target_sprite_gen,
distractor_sprite_gen)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
# Create the agent body
agent_body_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['circle']),
distribs.Discrete('scale', [0.07]),
distribs.Discrete('c0', [1.]),
distribs.Discrete('c1', [0.]),
distribs.Discrete('c2', [1.]),
])
agent_body_gen = sprite_generators.generate_sprites(
agent_body_factors, num_sprites=1)
sprite_gen = sprite_generators.chain_generators(sprite_gen, agent_body_gen)
task = tasks.FindGoalPosition(
filter_distrib=target_hue, terminate_distance=TERMINATE_DISTANCE)
renderers = {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64),
anti_aliasing=5,
color_to_rgb=spriteworld_renderers.color_maps.hsv_to_rgb)
}
config = {
'task': task,
'action_space': action_spaces.Embodied(step_size=0.05),
'renderers': renderers,
'init_sprites': sprite_gen,
'max_episode_length': 50,
'metadata': {
'name': os.path.basename(__file__),
}
}
return config
|
spriteworld-master
|
spriteworld/configs/examples/goal_finding_embodied.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
from spriteworld.configs.examples import goal_finding_clustering
from spriteworld.configs.examples import goal_finding_embodied
|
spriteworld-master
|
spriteworld/configs/examples/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
r"""Goal-Finding and clustering combined task.
To demo this task, navigate to the main directory and run the following:
'''
$ python demo --config=spriteworld.configs.examples.goal_finding_clustering \
--task_hsv_colors=False
'''
This is a complicated task designed only to exemplify the features of the task
specification procedures.
In this task there are three kinds of sprites:
1) Those to be clustered. These are triangles, squares, and pentagons. They must
be clustered according to their color.
2) Those to be brought to goal regions. These are 4-spokes and 4-stars. They
must be brought to different sides of the arena according to their color.
Namely, the reddish ones must be brought to the right side of the arena and the
greenish ones must be brought to the left side of the arena (the y-position is
irrelevant).
3) Distractors. These are circles.
There is a train/test split: In test mode, the colors of the objects to be
clustered and the scales of those to be brought to goals are different.
Note that the colors in this task are defined in RGB space, so be sure when
running the demo on it to set --task_hsv_colors=False.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from spriteworld import action_spaces
from spriteworld import factor_distributions as distribs
from spriteworld import renderers as spriteworld_renderers
from spriteworld import sprite_generators
from spriteworld import tasks
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: Unused task mode.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
# Factor distributions common to all objects.
common_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Continuous('angle', 0, 360, dtype='int32'),
])
# train/test split for goal-finding object scales and clustering object colors
goal_finding_scale_test = distribs.Continuous('scale', 0.08, 0.12)
green_blue_colors = distribs.Product([
distribs.Continuous('c1', 64, 256, dtype='int32'),
distribs.Continuous('c2', 64, 256, dtype='int32'),
])
if mode == 'train':
goal_finding_scale = distribs.SetMinus(
distribs.Continuous('scale', 0.05, 0.15),
goal_finding_scale_test,
)
cluster_colors = distribs.Product(
[distribs.Continuous('c0', 128, 256, dtype='int32'), green_blue_colors])
elif mode == 'test':
goal_finding_scale = goal_finding_scale_test
cluster_colors = distribs.Product(
[distribs.Continuous('c0', 0, 128, dtype='int32'), green_blue_colors])
else:
raise ValueError(
'Invalid mode {}. Mode must be "train" or "test".'.format(mode))
# Create clustering sprite generators
sprite_gen_list = []
cluster_shapes = [
distribs.Discrete('shape', [s])
for s in ['triangle', 'square', 'pentagon']
]
for shape in cluster_shapes:
factors = distribs.Product([
common_factors,
cluster_colors,
shape,
distribs.Continuous('scale', 0.08, 0.12),
])
sprite_gen_list.append(
sprite_generators.generate_sprites(factors, num_sprites=2))
# Create goal-finding sprite generators
goal_finding_colors = [
distribs.Product([
distribs.Continuous('c0', 192, 256, dtype='int32'),
distribs.Continuous('c1', 0, 128, dtype='int32'),
distribs.Continuous('c2', 64, 128, dtype='int32'),
]),
distribs.Product([
distribs.Continuous('c0', 0, 128, dtype='int32'),
distribs.Continuous('c1', 192, 256, dtype='int32'),
distribs.Continuous('c2', 64, 128, dtype='int32'),
])
]
# Goal positions corresponding to the colors in goal_finding_colors
goal_finding_positions = [(0., 0.5), (1., 0.5)]
goal_finding_shapes = distribs.Discrete('shape', ['spoke_4', 'star_4'])
for colors in goal_finding_colors:
factors = distribs.Product([
common_factors,
goal_finding_scale,
goal_finding_shapes,
colors,
])
sprite_gen_list.append(
sprite_generators.generate_sprites(
factors, num_sprites=lambda: np.random.randint(1, 3)))
# Create distractor sprite generator
distractor_factors = distribs.Product([
common_factors,
distribs.Discrete('shape', ['circle']),
distribs.Continuous('c0', 64, 256, dtype='uint8'),
distribs.Continuous('c1', 64, 256, dtype='uint8'),
distribs.Continuous('c2', 64, 256, dtype='uint8'),
distribs.Continuous('scale', 0.08, 0.12),
])
sprite_gen_list.append(sprite_generators.generate_sprites(
distractor_factors, num_sprites=lambda: np.random.randint(0, 3)))
# Concat clusters into single scene to generate
sprite_gen = sprite_generators.chain_generators(*sprite_gen_list)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
# Create the combined task of goal-finding and clustering
task_list = []
task_list.append(
tasks.Clustering(cluster_shapes, terminate_bonus=0., reward_range=10.))
for colors, goal_pos in zip(goal_finding_colors, goal_finding_positions):
goal_finding_task = tasks.FindGoalPosition(
distribs.Product([colors, goal_finding_shapes]),
goal_position=goal_pos,
weights_dimensions=(1, 0),
terminate_distance=0.15,
raw_reward_multiplier=30)
task_list.append(goal_finding_task)
task = tasks.MetaAggregated(
task_list, reward_aggregator='sum', termination_criterion='all')
renderers = {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64), anti_aliasing=5)
}
config = {
'task': task,
'action_space': action_spaces.SelectMove(scale=0.5),
'renderers': renderers,
'init_sprites': sprite_gen,
'max_episode_length': 50,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/examples/goal_finding_clustering.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Goal-Finding tasks testing for generalization to new shapes.
In this task there are target sprites of orange-green-ish color. In train mode
there is one target, while in test mode there are two. All target sprites must
be brought to the goal location, which is the center of the arena. There are
always two distractor sprites, which are blue-purple-ish color and do not
contribute to the reward.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
TERMINATE_DISTANCE = 0.075
NUM_DISTRACTORS = 2
MODES_NUM_TARGETS = {
'train': 1,
'test': 2,
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
shared_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
target_hue = distribs.Continuous('c0', 0., 0.4)
distractor_hue = distribs.Continuous('c0', 0.5, 0.9)
target_factors = distribs.Product([
target_hue,
shared_factors,
])
distractor_factors = distribs.Product([
distractor_hue,
shared_factors,
])
target_sprite_gen = sprite_generators.generate_sprites(
target_factors, num_sprites=MODES_NUM_TARGETS[mode])
distractor_sprite_gen = sprite_generators.generate_sprites(
distractor_factors, num_sprites=NUM_DISTRACTORS)
sprite_gen = sprite_generators.chain_generators(target_sprite_gen,
distractor_sprite_gen)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
task = tasks.FindGoalPosition(
filter_distrib=target_hue, terminate_distance=TERMINATE_DISTANCE)
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 20,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/goal_finding_more_targets.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Goal-Finding tasks testing for generalization to new shapes.
In this task there is one sprite per episode. That sprite must be brought to the
goal location, which is always the center of the arena. At training time the
sprite is a square. At test time it is either a circle or a triangle.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
TERMINATE_DISTANCE = 0.075
NUM_TARGETS = 1
MODES_SHAPES = {
'train': distribs.Discrete('shape', ['square']),
'test': distribs.Discrete('shape', ['triangle', 'circle']),
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
factors = distribs.Product([
MODES_SHAPES[mode],
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c0', 0., 0.4),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
sprite_gen = sprite_generators.generate_sprites(
factors, num_sprites=NUM_TARGETS)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
task = tasks.FindGoalPosition(terminate_distance=TERMINATE_DISTANCE)
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 20,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/goal_finding_new_shape.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Goal-Finding tasks testing for generalization to new initial positions.
In this task there is one target sprite of orange-green-ish color and one
distractor sprite of blue-purple-ish color. The target must be brought to the
goal location, which is the center of the arena, while the distractor does not
contribute to the reward.
In train mode the target is initialized in any position except the lower-right
quadrant, while in test mode it is initialized only in the lower-right quadrant.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
TERMINATE_DISTANCE = 0.075
NUM_TARGETS = 1
NUM_DISTRACTORS = 1
MODES_TARGET_POSITIONS = {
'train':
distribs.SetMinus(
distribs.Product((
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
)),
distribs.Product((
distribs.Continuous('x', 0.5, 0.9),
distribs.Continuous('y', 0.5, 0.9),
)),
),
'test':
distribs.Product((
distribs.Continuous('x', 0.5, 0.9),
distribs.Continuous('y', 0.5, 0.9),
)),
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
shared_factors = distribs.Product([
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
target_hue = distribs.Continuous('c0', 0., 0.4)
distractor_hue = distribs.Continuous('c0', 0.5, 0.9)
target_factors = distribs.Product([
MODES_TARGET_POSITIONS[mode],
target_hue,
shared_factors,
])
distractor_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distractor_hue,
shared_factors,
])
target_sprite_gen = sprite_generators.generate_sprites(
target_factors, num_sprites=NUM_TARGETS)
distractor_sprite_gen = sprite_generators.generate_sprites(
distractor_factors, num_sprites=NUM_DISTRACTORS)
sprite_gen = sprite_generators.chain_generators(target_sprite_gen,
distractor_sprite_gen)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
task = tasks.FindGoalPosition(
filter_distrib=target_hue, terminate_distance=TERMINATE_DISTANCE)
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 20,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/goal_finding_new_position.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Clustering task used in COBRA.
Cluster sprites by color.
We use 4 types of sprites, based on their hue.
We then compute a Davies-Bouldin clustering metric to assess clustering quality
(and generate a reward). The Clustering task uses a threshold to terminate an
episode when the clustering metric is good enough.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
# Task Parameters
NUM_SPRITES_PER_CLUSTER = 2
MAX_EPISODE_LENGTH = 50
# Define possible clusters (here using Hue as selection attribute)
CLUSTERS_DISTS = {
'red': distribs.Continuous('c0', 0.9, 1.),
'blue': distribs.Continuous('c0', 0.55, 0.65),
'green': distribs.Continuous('c0', 0.27, 0.37),
'yellow': distribs.Continuous('c0', 0.1, 0.2),
}
# Define train/test generalization splits
MODES = {
'train': ('blue', 'green'),
'test': ('red', 'yellow'),
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
# Select clusters to use, and their c0 factor distribution.
c0_clusters = [CLUSTERS_DISTS[cluster] for cluster in MODES[mode]]
print('Clustering task: {}, #sprites: {}'.format(MODES[mode],
NUM_SPRITES_PER_CLUSTER))
other_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
# Generate the sprites to be used in this task, by combining Hue with the
# other factors.
sprite_factors = [
distribs.Product((other_factors, c0)) for c0 in c0_clusters
]
# Convert to sprites, generating the appropriate number per cluster.
sprite_gen_per_cluster = [
sprite_generators.generate_sprites(
factors, num_sprites=NUM_SPRITES_PER_CLUSTER)
for factors in sprite_factors
]
# Concat clusters into single scene to generate.
sprite_gen = sprite_generators.chain_generators(*sprite_gen_per_cluster)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
# Clustering task will define rewards
task = tasks.Clustering(c0_clusters, terminate_bonus=0., reward_range=10.)
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': MAX_EPISODE_LENGTH,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/clustering.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Exploration task used in COBRA.
There is no reward for this task, as it is used for task-free curiosity-drive
exploration.
Episodes last 10 steps, and each is initialized with 1-6 sprites of random
shape, color, and position.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
def get_config(mode=None):
"""Generate environment config.
Args:
mode: Unused.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
del mode # No train/test split for pure exploration
factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c0', 0., 1.),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
num_sprites = lambda: np.random.randint(1, 7)
sprite_gen = sprite_generators.generate_sprites(
factors, num_sprites=num_sprites)
task = tasks.NoReward()
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 10,
'metadata': {
'name': os.path.basename(__file__)
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/exploration.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Sorting task used in COBRA.
Sort sprites into target locations based on color.
We use 5 narrow hue ranges (red, blue, green, purple, yellow), and associated to
each a goal location (the corners and center of the arena). Each episode we
sample two sprites in random locations with different colors and reward the
agent for bringing them each to their respective goal location. in the training
mode we hold out one color pair, and in the test mode we sample only that pair.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import numpy as np
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
# Task Parameters
MAX_EPISODE_LENGTH = 50
TERMINATE_DISTANCE = 0.075
RAW_REWARD_MULTIPLIER = 20.
NUM_TARGETS = 2
# Sub-tasks for each color/goal
SUBTASKS = (
{
'distrib': distribs.Continuous('c0', 0.9, 1.), # red
'goal_position': np.array([0.75, 0.75])
},
{
'distrib': distribs.Continuous('c0', 0.55, 0.65), # blue
'goal_position': np.array([0.75, 0.25])
},
{
'distrib': distribs.Continuous('c0', 0.27, 0.37), # green
'goal_position': np.array([0.25, 0.75])
},
{
'distrib': distribs.Continuous('c0', 0.73, 0.83), # purple
'goal_position': np.array([0.25, 0.25])
},
{
'distrib': distribs.Continuous('c0', 0.1, 0.2), # yellow
'goal_position': np.array([0.5, 0.5])
},
)
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
# Create the subtasks and their corresponding sprite generators
subtasks = []
sprite_gen_per_subtask = []
for subtask in SUBTASKS:
subtasks.append(tasks.FindGoalPosition(
filter_distrib=subtask['distrib'],
goal_position=subtask['goal_position'],
terminate_distance=TERMINATE_DISTANCE,
raw_reward_multiplier=RAW_REWARD_MULTIPLIER))
factors = distribs.Product((
subtask['distrib'],
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
))
sprite_gen_per_subtask.append(
sprite_generators.generate_sprites(factors, num_sprites=1))
# Consider all combinations of subtasks
subtask_combos = list(
itertools.combinations(np.arange(len(SUBTASKS)), NUM_TARGETS))
if mode == 'train':
# Randomly sample a combination of subtasks, holding one combination out
sprite_gen = sprite_generators.sample_generator([
sprite_generators.chain_generators(
*[sprite_gen_per_subtask[i] for i in c]) for c in subtask_combos[1:]
])
elif mode == 'test':
# Use the held-out subtask combination for testing
sprite_gen = sprite_generators.chain_generators(
*[sprite_gen_per_subtask[i] for i in subtask_combos[0]])
else:
raise ValueError('Invalide mode {}.'.format(mode))
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
task = tasks.MetaAggregated(
subtasks, reward_aggregator='sum', termination_criterion='all')
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': MAX_EPISODE_LENGTH,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/sorting.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
from spriteworld.configs.cobra import clustering
from spriteworld.configs.cobra import exploration
from spriteworld.configs.cobra import goal_finding_more_distractors
from spriteworld.configs.cobra import goal_finding_more_targets
from spriteworld.configs.cobra import goal_finding_new_position
from spriteworld.configs.cobra import goal_finding_new_shape
from spriteworld.configs.cobra import sorting
|
spriteworld-master
|
spriteworld/configs/cobra/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Shared definitions and methods across all COBRA tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from spriteworld import action_spaces
from spriteworld import renderers as spriteworld_renderers
def action_space():
return action_spaces.SelectMove(scale=0.25)
def renderers():
return {
'image':
spriteworld_renderers.PILRenderer(
image_size=(64, 64),
anti_aliasing=5,
color_to_rgb=spriteworld_renderers.color_maps.hsv_to_rgb,
)
}
|
spriteworld-master
|
spriteworld/configs/cobra/common.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Goal-Finding tasks testing for generalization to more distractors.
In this task there are two target sprites per episode of orange-green-ish color.
Those sprites must be brought to the goal location, which is the center of the
arena. There are also distractor sprites, which are blue-purple-ish color and do
not contribute to the reward. In train mode there is 1 distractor, while in test
mode there are two.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from spriteworld import factor_distributions as distribs
from spriteworld import sprite_generators
from spriteworld import tasks
from spriteworld.configs.cobra import common
TERMINATE_DISTANCE = 0.075
NUM_TARGETS = 2
MODES_NUM_DISTRACTORS = {
'train': 1,
'test': 2,
}
def get_config(mode='train'):
"""Generate environment config.
Args:
mode: 'train' or 'test'.
Returns:
config: Dictionary defining task/environment configuration. Can be fed as
kwargs to environment.Environment.
"""
shared_factors = distribs.Product([
distribs.Continuous('x', 0.1, 0.9),
distribs.Continuous('y', 0.1, 0.9),
distribs.Discrete('shape', ['square', 'triangle', 'circle']),
distribs.Discrete('scale', [0.13]),
distribs.Continuous('c1', 0.3, 1.),
distribs.Continuous('c2', 0.9, 1.),
])
target_hue = distribs.Continuous('c0', 0., 0.4)
distractor_hue = distribs.Continuous('c0', 0.5, 0.9)
target_factors = distribs.Product([
target_hue,
shared_factors,
])
distractor_factors = distribs.Product([
distractor_hue,
shared_factors,
])
target_sprite_gen = sprite_generators.generate_sprites(
target_factors, num_sprites=NUM_TARGETS)
distractor_sprite_gen = sprite_generators.generate_sprites(
distractor_factors, num_sprites=MODES_NUM_DISTRACTORS[mode])
sprite_gen = sprite_generators.chain_generators(target_sprite_gen,
distractor_sprite_gen)
# Randomize sprite ordering to eliminate any task information from occlusions
sprite_gen = sprite_generators.shuffle(sprite_gen)
task = tasks.FindGoalPosition(
filter_distrib=target_hue, terminate_distance=TERMINATE_DISTANCE)
config = {
'task': task,
'action_space': common.action_space(),
'renderers': common.renderers(),
'init_sprites': sprite_gen,
'max_episode_length': 20,
'metadata': {
'name': os.path.basename(__file__),
'mode': mode
}
}
return config
|
spriteworld-master
|
spriteworld/configs/cobra/goal_finding_more_distractors.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from PIL import Image
from PIL import ImageDraw
from spriteworld import shapes
class ShapesTest(parameterized.TestCase):
def _test_area(self, path):
im_size = 1000
path = im_size * path / 2 + im_size / 2
im = Image.new('RGB', (im_size, im_size))
draw = ImageDraw.Draw(im)
draw.polygon([tuple(p) for p in path], fill=(255, 255, 255))
desired_area = 0.25 * im_size * im_size * 3
true_area = np.sum(np.array(im) > 0)
self.assertAlmostEqual(desired_area / true_area, 1, delta=1e-2)
@parameterized.parameters(3, 4, 5, 6, 7, 8, 10)
def testPolygon(self, num_sides):
path = shapes.polygon(num_sides)
self._test_area(path)
@parameterized.parameters((3, 0.5), (3, 1.5), (5, 0.6), (5, 2.0), (8, 0.2),
(8, 3.0), (11, 1.2))
def testStar(self, num_sides, point_height):
path = shapes.star(num_sides, point_height)
self._test_area(path)
@parameterized.parameters((3, 0.5), (3, 1.5), (5, 0.6), (5, 2.0), (8, 0.2),
(8, 3.0), (11, 1.2))
def testSpokes(self, num_sides, spoke_height):
path = shapes.star(num_sides, spoke_height)
self._test_area(path)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/shapes_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for sprite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from six.moves import range
from spriteworld import sprite
class SpriteTest(parameterized.TestCase):
def testBasicInitialization(self):
sprite.Sprite(
x=0.2,
y=0.8,
shape='triangle',
angle=45,
scale=0.3,
c0=200,
c1=150,
c2=100,
x_vel=-0.2,
y_vel=0.1)
@parameterized.parameters(
(0.5, 0.5, (-0.3, 0.2), (0.2, 0.7), False),
(0.1, 0.1, (-0.3, 0.2), (-0.2, 0.3), False),
(0.1, 0.1, (-0.3, 0.2), (0.0, 0.3), True))
def testMove(self, x, y, motion, final_position, keep_in_frame):
s = sprite.Sprite(x=x, y=y)
s.move(motion, keep_in_frame=keep_in_frame)
self.assertSequenceAlmostEqual(s.position, final_position, delta=1e-6)
@parameterized.parameters(
dict(
x=0.5,
y=0.5,
shape='square',
angle=0,
scale=0.5,
containment=[
[False, False, False, False],
[False, True, True, False],
[False, True, True, False],
[False, False, False, False],
]),
dict(
x=0.5,
y=0.5,
shape='square',
angle=45,
scale=1,
containment=[
[False, True, True, False],
[True, True, True, True],
[True, True, True, True],
[False, True, True, False],
]),
dict(
x=0.75,
y=0.75,
shape='square',
angle=0,
scale=0.5,
containment=[
[False, False, True, True],
[False, False, True, True],
[False, False, False, False],
[False, False, False, False],
]),
dict(
x=0.65,
y=0.55,
shape='triangle',
angle=0,
scale=0.5,
containment=[
[False, False, True, False],
[False, False, True, False],
[False, True, True, True],
[False, False, False, False],
]),
dict(
x=0.37,
y=0.55,
shape='star_5',
angle=0,
scale=0.6,
containment=[
[False, True, False, False],
[True, True, True, False],
[False, True, False, False],
[False, False, False, False],
]),
)
def testContainsPoint(self, x, y, shape, angle, scale, containment):
# As we use plots to prepare these tests, it's easier to write the matrix
# "in the wrong orientation" (i.e. with origin='lower') and flip it.
containment = np.flipud(containment)
linspace = np.linspace(0.1, 0.9, 4)
grid = np.stack(np.meshgrid(linspace, linspace), axis=-1)
s = sprite.Sprite(x=x, y=y, shape=shape, angle=angle, scale=scale)
eval_containment = np.array(
[[s.contains_point(p) for p in row] for row in grid])
self.assertTrue(np.allclose(eval_containment, containment))
@parameterized.parameters(
(0.5, 0.5, 'square', 0, 0.25),
(0.1, 0.8, 'square', 0, 0.25),
(0.5, 0.5, 'triangle', 0, 0.5),
(0.5, 0.5, 'triangle', 30, 0.5))
def testSampleContainedPosition(self, x, y, shape, angle, scale):
s = sprite.Sprite(x=x, y=y, shape=shape, angle=angle, scale=scale)
for _ in range(5):
p = s.sample_contained_position()
self.assertTrue(s.contains_point(p))
def testResetShape(self):
s = sprite.Sprite(scale=0.25, shape='square')
square_vertices = [[0.625, 0.625], [0.375, 0.625], [0.375, 0.375],
[0.625, 0.375]]
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(square_vertices), delta=1e-3)
s.shape = 'triangle'
triangle_vertices = [[0.5, 0.72], [0.31, 0.39], [0.69, 0.39]]
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(triangle_vertices), delta=1e-2)
def testResetAngle(self):
init_vertices = [[0.625, 0.625], [0.375, 0.625], [0.375, 0.375],
[0.625, 0.375]]
s = sprite.Sprite(angle=0, scale=0.25, shape='square')
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(init_vertices), delta=1e-3)
s.angle = -45
rotated_vertices = [[0.677, 0.5], [0.5, 0.677], [0.323, 0.5], [0.5, 0.323]]
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(rotated_vertices), delta=1e-3)
def testResetScale(self):
s = sprite.Sprite(scale=0.25, shape='square')
init_vertices = [[0.625, 0.625], [0.375, 0.625], [0.375, 0.375],
[0.625, 0.375]]
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(init_vertices), delta=1e-3)
s.scale = 0.5
scaled_vertices = [[0.531, 0.531], [0.469, 0.531], [0.469, 0.469],
[0.531, 0.469]]
self.assertSequenceAlmostEqual(
np.ravel(s.vertices), np.ravel(scaled_vertices), delta=1e-3)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/sprite_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for factor_distributions.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from six.moves import range
from spriteworld import factor_distributions as distribs
def test_sampling_and_containment(test_object, d, contained, not_contained):
for _ in range(5):
test_object.assertTrue(d.contains(d.sample()))
for contained_spec in contained:
try:
contained = d.contains(contained_spec)
except KeyError:
# Having the wrong keys also indicate it is not contained.
contained = False
test_object.assertTrue(contained)
for not_contained_spec in not_contained:
try:
contained = d.contains(not_contained_spec)
except KeyError:
contained = False
test_object.assertFalse(contained)
class ContinuousTest(parameterized.TestCase):
"""Runs tests for Continuous distribution."""
@parameterized.parameters(
(0.0, 1.0, (-0.5, 2.0)),
(-1.0, -0.1, (-1.4, 0.5)),
)
def testSamplingContainmentContinuous(self, minval, maxval, not_contained):
d = distribs.Continuous('x', minval, maxval)
for _ in range(5):
self.assertTrue(d.contains(d.sample()))
for not_contained_value in not_contained:
self.assertFalse(d.contains({'x': not_contained_value}))
def testDType(self):
d_int = distribs.Continuous('x', 0, 1, dtype='int32')
d_float = distribs.Continuous('x', 0, 1, dtype='float32')
self.assertTrue(d_int.contains({'x': 0}))
self.assertTrue(d_float.contains({'x': 0}))
self.assertFalse(d_int.contains({'x': 1}))
self.assertFalse(d_float.contains({'x': 1}))
for _ in range(5):
self.assertEqual(d_int.sample(), {'x': 0})
class DiscreteTest(parameterized.TestCase):
"""Runs tests for Discrete distribution."""
@parameterized.parameters(
([1, 2, 3], [1, 2, 3], [0, 4]),
(['a', 'b', 'c'], ['a', 'b', 'c'], ['d', 0]),
)
def testSamplingContainmentDiscrete(self, candidates, contained,
not_contained):
d = distribs.Discrete('x', candidates)
cont = [{'x': value} for value in contained]
not_cont = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, cont, not_cont)
class MixtureTest(parameterized.TestCase):
"""Runs tests for Mixture of distributions."""
@parameterized.named_parameters(
('DisjointContinuous', distribs.Continuous(
'x', 0, 1), distribs.Continuous('x', 2, 3), [0.5, 2.5], [1.5, 3.5]),
('OverlappingContinuous', distribs.Continuous(
'x', 0, 2), distribs.Continuous('x', 1, 3), [0.5, 2.5], [-0.5, 3.5]),
('DisjointDiscrete', distribs.Discrete(
'x', [0, 1]), distribs.Discrete('x', [2, 3]), [1, 2], [-1, 4]),
('OverlappingDiscrete', distribs.Discrete(
'x', [0, 1]), distribs.Discrete('x', [1, 2]), [0, 1, 2], [-1, 3]),
('ContinuousDiscrete', distribs.Continuous(
'x', 0, 2), distribs.Discrete('x', [1, 3]), [0.5, 3], [2.5]),
)
def testSamplingContainmentMixtureTwo(self, c_0, c_1, contained,
not_contained):
d = distribs.Mixture((c_0, c_1))
contained = [{'x': value} for value in contained]
not_contained = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, contained, not_contained)
def testSamplingContainmentMixtureMultiple(self):
dists = [
distribs.Continuous('x', 0, 2),
distribs.Continuous('x', 1, 5),
distribs.Continuous('x', 9, 12),
distribs.Discrete('x', [7, 10]),
distribs.Discrete('x', [14]),
]
contained = [0.5, 4, 11, 7, 14]
not_contained = [5.5, 6, 8, 13]
d = distribs.Mixture(dists)
contained = [{'x': value} for value in contained]
not_contained = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, contained, not_contained)
def testRaisesError(self):
c_0 = distribs.Discrete('x', [0])
c_1 = distribs.Discrete('y', [1])
with self.assertRaises(ValueError):
distribs.Mixture((c_0, c_1))
def testProbs(self):
c_0 = distribs.Discrete('x', [0])
c_1 = distribs.Discrete('x', [1])
d_0 = distribs.Mixture([c_0, c_1], probs=(0.3, 0.7))
d_1 = distribs.Mixture([c_0, c_1], probs=(0.0, 1.0))
for _ in range(5):
self.assertTrue(d_0.contains(d_0.sample()))
for _ in range(5):
self.assertEqual(d_1.sample(), {'x': 1})
class IntersectionTest(parameterized.TestCase):
"""Runs tests for Intersection of distributions."""
@parameterized.named_parameters(
('ContinuousContinuous', distribs.Continuous(
'x', 0, 2), distribs.Continuous('x', 1, 3), [1.5], [0.5, 2.5]),
('DiscreteDiscrete', distribs.Discrete(
'x', [0, 1]), distribs.Discrete('x', [1, 2]), [1], [0, 2]),
('DiscreteContinuous', distribs.Discrete(
'x', [1, 3]), distribs.Continuous('x', 0, 2), [1], [0.5, 1.5, 3]),
)
def testSamplingContainmentIntersectionTwo(self, d_0, d_1, contained,
not_contained):
d = distribs.Intersection((d_0, d_1))
contained = [{'x': value} for value in contained]
not_contained = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, contained, not_contained)
def testSamplingContainmentIntersectionMultiple(self):
dists = [
distribs.Discrete('x', [1, 2.5, 3, 4, 6]),
distribs.Discrete('x', [1, 2.5, 3, 12]),
distribs.Continuous('x', 0, 5),
distribs.Continuous('x', 2, 10),
]
contained = [2.5, 3]
not_contained = [1, 4, 8]
d = distribs.Intersection(dists)
contained = [{'x': value} for value in contained]
not_contained = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, contained, not_contained)
def testRaisesError(self):
d_0 = distribs.Continuous('x', 0, 1)
d_1 = distribs.Continuous('x', 2, 3)
d = distribs.Intersection((d_0, d_1))
with self.assertRaises(ValueError):
d.sample()
def testIndexForSampling(self):
d_0 = distribs.Continuous('x', 0, 2)
d_1 = distribs.Discrete('x', [1, 3])
d = distribs.Intersection((d_0, d_1), index_for_sampling=1)
d.sample()
with self.assertRaises(ValueError):
d = distribs.Intersection((d_0, d_1), index_for_sampling=0)
d.sample()
def testKeys(self):
d_0 = distribs.Product(
(distribs.Continuous('x', 0, 2), distribs.Continuous('y', 0, 1)))
d_1 = distribs.Product(
(distribs.Continuous('x', 0, 1), distribs.Continuous('y', 0, 0.5)))
d_2 = distribs.Continuous('x', 0.4, 0.6)
distribs.Intersection((d_0, d_1))
with self.assertRaises(ValueError):
distribs.Intersection((d_0, d_2))
class SelectionTest(parameterized.TestCase):
"""Runs tests for Selection of distributions."""
@parameterized.named_parameters(
(
'Continuous',
distribs.Continuous('x', 0, 2),
distribs.Continuous('x', 1, 3),
[{
'x': 1.5
}],
[{
'x': 0.5
}, {
'x': 2.5
}],
),
(
'Discrete',
distribs.Discrete('x', [0, 1]),
distribs.Discrete('x', [1, 2]),
[{
'x': 1
}],
[{
'x': 0
}, {
'x': 2
}],
),
(
'MultiDimensional',
distribs.Product(
(distribs.Discrete('x', [1, 2]), distribs.Discrete('y', [3, 4]))),
distribs.Discrete('x', [2]),
[{
'x': 2,
'y': 3
}],
[{
'x': 1,
'y': 3
}, {
'x': 2
}, {
'x': 2,
'y': 5
}],
),
)
def testSamplingContainmentSelection(self, d_base, d_filter, contained,
not_contained):
d = distribs.Selection(d_base, d_filter)
test_sampling_and_containment(self, d, contained, not_contained)
def testRaisesErrorFailedSampling(self):
d_base = distribs.Continuous('x', 0, 1)
d_filter = distribs.Continuous('x', 2, 3)
d = distribs.Selection(d_base, d_filter)
with self.assertRaises(ValueError):
d.sample()
def testKeys(self):
d_base = distribs.Product(
(distribs.Continuous('x', 0, 2), distribs.Continuous('y', 0, 1)))
d_filter_1 = distribs.Continuous('x', 0, 1)
d_filter_2 = distribs.Continuous('z', 0.4, 0.6)
distribs.Selection(d_base, d_filter_1)
with self.assertRaises(ValueError):
distribs.Selection(d_base, d_filter_2)
class ProductTest(parameterized.TestCase):
"""Runs tests for Product of distributions."""
@parameterized.named_parameters(
('ContinuousContinuous', distribs.Continuous(
'x', 0, 2), distribs.Continuous('y', 1, 3), [{
'x': 0.5,
'y': 2.5
}, {
'x': 1.5,
'y': 1.5
}], [{
'x': 0.5,
'y': 0.5
}, {
'x': 2.5,
'y': 1.5
}]),
('DiscreteDiscrete', distribs.Discrete(
'x', [0, 1]), distribs.Discrete('y', [1, 2]), [{
'x': 0,
'y': 2
}, {
'x': 1,
'y': 1
}], [{
'x': 1,
'y': 0
}, {
'x': 2,
'y': 2
}]),
('DiscreteContinuous', distribs.Discrete(
'x', [1, 3]), distribs.Continuous('y', 0, 2), [{
'x': 1,
'y': 1
}, {
'x': 3,
'y': 0.5
}], [{
'x': 2,
'y': 1
}, {
'x': 3,
'y': 3
}]),
)
def testSamplingContainmentProductTwo(self, d_0, d_1, contained,
not_contained):
d = distribs.Product((d_0, d_1))
test_sampling_and_containment(self, d, contained, not_contained)
def testSamplingContainmentProductMultiple(self):
dists = [
distribs.Discrete('x', [1, 2.5, 3, 4, 6]),
distribs.Discrete('y', [1, 2.5, 3, 12]),
distribs.Continuous('z', 0, 5),
distribs.Continuous('w', 2, 10),
]
contained = [{'x': 2.5, 'y': 12, 'z': 3.5, 'w': 9}]
not_contained = [
{'x': 2.5, 'y': 12, 'z': 3.5, 'w': 1},
{'x': 3.5, 'y': 12, 'z': 3.5, 'w': 9},
]
d = distribs.Product(dists)
test_sampling_and_containment(self, d, contained, not_contained)
def testRaisesError(self):
d_0 = distribs.Continuous('x', 0, 1)
d_1 = distribs.Continuous('x', 2, 3)
with self.assertRaises(ValueError):
distribs.Product((d_0, d_1))
def testkeys(self):
dists = [
distribs.Discrete('x', [1, 2.5, 3, 12]),
distribs.Continuous('y', 0, 5),
distribs.Continuous('z', 2, 10),
]
d = distribs.Product(dists)
self.assertEqual(d.keys, set(('x', 'y', 'z')))
class SetMinusTest(parameterized.TestCase):
"""Runs tests for SetMinus of distributions."""
@parameterized.named_parameters(
('ContinuousContinuous', distribs.Continuous(
'x', 0, 2), distribs.Continuous('x', 1, 3), [0.5], [1.5]),
('DiscreteDiscrete', distribs.Discrete(
'x', [0, 1]), distribs.Discrete('x', [1, 2]), [0], [1]),
('DiscreteContinuous', distribs.Discrete(
'x', [1, 3]), distribs.Continuous('x', 0, 2), [3], [1]),
('ContinuousDiscrete', distribs.Continuous(
'x', 0, 2), distribs.Discrete('x', [1, 3]), [0.5, 1.5], [1]),
)
def testSamplingContainmentSetMinusTwo(self, d_0, d_1, contained,
not_contained):
d = distribs.SetMinus(d_0, d_1)
contained = [{'x': value} for value in contained]
not_contained = [{'x': value} for value in not_contained]
test_sampling_and_containment(self, d, contained, not_contained)
def testSamplingContainmentSetMinusMultiple(self):
base = distribs.Continuous('x', 2, 10)
hold_out = distribs.Mixture([
distribs.Discrete('x', [1, 4, 6]),
distribs.Discrete('x', [3, 8, 9, 12]),
distribs.Continuous('x', 3, 5),
])
contained = [{'x': value} for value in [2.5, 5.5, 7, 9.5]]
not_contained = [{'x': value} for value in [4, 6, 9, 11]]
d = distribs.SetMinus(base, hold_out)
test_sampling_and_containment(self, d, contained, not_contained)
def testRaisesError(self):
d_0 = distribs.Continuous('x', 0, 2)
d_1 = distribs.Continuous('y', 1, 3)
with self.assertRaises(ValueError):
distribs.SetMinus(d_0, d_1)
class CompositionTest(parameterized.TestCase):
"""Runs tests for compositions of distribution operations."""
def testCornerUnion(self):
square_0 = distribs.Product([
distribs.Continuous('x', 0, 3),
distribs.Continuous('y', 0, 3),
])
hold_out_0 = distribs.Product([
distribs.Continuous('x', 1, 3),
distribs.Continuous('y', 0, 2),
])
square_1 = distribs.Product([
distribs.Continuous('x', 2, 5),
distribs.Continuous('y', 0, 3),
])
hold_out_1 = distribs.Product([
distribs.Continuous('x', 2, 4),
distribs.Continuous('y', 1, 3),
])
corner_0 = distribs.SetMinus(square_0, hold_out_0)
corner_1 = distribs.SetMinus(square_1, hold_out_1)
corner_union = distribs.Mixture([corner_0, corner_1])
contained = [
{'x': 0.5, 'y': 0.5},
{'x': 0.5, 'y': 2.5},
{'x': 2.5, 'y': 2.5},
{'x': 2.5, 'y': 0.5},
{'x': 4.5, 'y': 0.5},
{'x': 4.5, 'y': 2.5},
]
not_contained = [
{'x': 1.5, 'y': 0.5},
{'x': 1.5, 'y': 1.5},
{'x': 2.5, 'y': 1.5},
{'x': 3.5, 'y': 1.5},
{'x': 3.5, 'y': 2.5},
]
test_sampling_and_containment(self, corner_union, contained, not_contained)
def testCubeWithTunnel(self):
cube = distribs.Product([
distribs.Continuous('x', 0, 1),
distribs.Continuous('y', 0, 1),
distribs.Continuous('z', 0, 1),
])
tunnel = distribs.Product([
distribs.Continuous('x', 0.25, 0.75),
distribs.Continuous('y', 0.25, 0.75),
])
cube_with_tunnel = distribs.SetMinus(cube, tunnel)
contained = [
{'x': 0.2, 'y': 0.2, 'z': 0.2},
{'x': 0.2, 'y': 0.2, 'z': 0.5},
{'x': 0.2, 'y': 0.5, 'z': 0.5}
]
not_contained = [
{'x': 0.5, 'y': 0.5, 'z': 0.5},
{'x': 0.5, 'y': 0.5, 'z': 0.2},
]
test_sampling_and_containment(self, cube_with_tunnel, contained,
not_contained)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/factor_distributions_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
|
spriteworld-master
|
tests/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for gym wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from gym import spaces
import numpy as np
from six.moves import range
from spriteworld import action_spaces
from spriteworld import environment
from spriteworld import gym_wrapper
from spriteworld import renderers as spriteworld_renderers
from spriteworld import sprite
from spriteworld import tasks
class GymWrapperTest(absltest.TestCase):
def testContinuousActions(self):
renderers = {
'image': spriteworld_renderers.PILRenderer(image_size=(64, 64),)
}
init_sprites = lambda: [sprite.Sprite(c0=255)]
max_episode_length = 5
spriteworld_env = environment.Environment(
tasks.NoReward(),
action_spaces.SelectMove(),
renderers,
init_sprites,
max_episode_length=max_episode_length)
env = gym_wrapper.GymWrapper(spriteworld_env)
self.assertEqual(
env.observation_space,
spaces.Dict({
'image':
spaces.Box(-np.inf, np.inf, shape=(64, 64, 3), dtype=np.uint8)
}))
self.assertEqual(env.action_space,
spaces.Box(0., 1., shape=(4,), dtype=np.float32))
for _ in range(3):
env.reset()
for _ in range(max_episode_length - 1):
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
self.assertEqual(obs['image'].dtype, np.uint8)
self.assertFalse(done)
self.assertEqual(reward, 0.)
action = env.action_space.sample()
_, _, done, _ = env.step(action)
self.assertTrue(done)
_, _, done, _ = env.step(action)
self.assertFalse(done)
def testEmbodiedActions(self):
renderers = {
'image': spriteworld_renderers.PILRenderer(image_size=(64, 64),)
}
init_sprites = lambda: [sprite.Sprite(c0=255)]
max_episode_length = 5
spriteworld_env = environment.Environment(
tasks.NoReward(),
action_spaces.Embodied(),
renderers,
init_sprites,
max_episode_length=max_episode_length)
env = gym_wrapper.GymWrapper(spriteworld_env)
self.assertEqual(
env.observation_space,
spaces.Dict({
'image':
spaces.Box(-np.inf, np.inf, shape=(64, 64, 3), dtype=np.uint8)
}))
self.assertEqual(env.action_space,
spaces.Tuple([spaces.Discrete(2),
spaces.Discrete(4)]))
for _ in range(3):
env.reset()
for _ in range(max_episode_length - 1):
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
self.assertEqual(obs['image'].dtype, np.uint8)
self.assertFalse(done)
self.assertEqual(reward, 0.)
action = env.action_space.sample()
_, _, done, _ = env.step(action)
self.assertTrue(done)
_, _, done, _ = env.step(action)
self.assertFalse(done)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/gym_wrapper_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import mock
import numpy as np
from spriteworld import factor_distributions as distribs
from spriteworld import sprite
from spriteworld import tasks
class GoalPositionTest(parameterized.TestCase):
def _mock_sprites(self, sprite_positions):
sprites = []
for sprite_pos in sprite_positions:
mocksprite = mock.Mock(spec=sprite.Sprite)
mocksprite.position = sprite_pos
sprites.append(mocksprite)
return sprites
@parameterized.parameters(
([np.array([0., 0.])], (0.5, 0.5), -30.4, False),
([np.array([0.4, 0.6])], (0.5, 0.5), -2.1, False),
([np.array([0.43, 0.56])], (0.5, 0.5), 0.4, True),
([np.array([0.48, 0.52]), np.array([0.4, 0.6])], (0.5, 0.5), 1.5, False),
([np.array([0.48, 0.52]), np.array([0.5, 0.5])], (0.5, 0.5), 8.6, True))
def testBasicReward(self, sprite_positions, goal_position, reward, success):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=goal_position, terminate_distance=0.1)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
self.assertEqual(task.success(sprites), success)
@parameterized.parameters(
([np.array([0.4, 0.6])], 0.15, 0.4, True),
([np.array([0.36, 0.5])], 0.15, 0.5, True),
([np.array([0.34, 0.5])], 0.15, -0.5, False),
([np.array([0.34, 0.5])], 0.2, 2., True),
([np.array([0.34, 0.39])], 0.2, 0.2, True),
([np.array([0.34, 0.37])], 0.2, -0.3, False))
def testTerminateDistance(self, sprite_positions, terminate_distance, reward,
success):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=(0.5, 0.5), terminate_distance=terminate_distance)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
self.assertEqual(task.success(sprites), success)
@parameterized.parameters(
([np.array([0.4, 0.52])], 3., -0.1),
([np.array([0.43, 0.52])], 3., 4.4),
([np.array([0.43, 0.52])], 1., 2.4),
([np.array([0.43, 0.52]), np.array([0.4, 0.52])], 3., 1.3),
([np.array([0.43, 0.52]), np.array([0.43, 0.52])], 3., 5.7))
def testTerminateBonus(self, sprite_positions, terminate_bonus, reward):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=(0.5, 0.5),
terminate_distance=0.1,
terminate_bonus=terminate_bonus)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
@parameterized.parameters(
([np.array([0.43, 0.52])], (1, 1), 1.4, True),
([np.array([0.43, 0.52])], (3, 1), -1.1, False),
([np.array([0.3, 0.52])], (7, 2), -21.5, False),
([np.array([0.3, 0.52])], (0.1, 0.2), 1.8, True),
)
def testWeightsDimensions(self, sprite_positions, weights_dimensions, reward,
success):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=(0.5, 0.5),
terminate_distance=0.1,
weights_dimensions=weights_dimensions)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
self.assertEqual(task.success(sprites), success)
@parameterized.parameters(
([np.array([0.35, 0.52])], 0., 50.0, -2.6),
([np.array([0.35, 0.52])], 0., 10.0, -0.5),
([np.array([0.43, 0.52])], 1., 10.0, 1.3),
([np.array([0.43, 0.52]), np.array([0.4, 0.52])], 0., 50.0, 1.3),
([np.array([0.43, 0.52]), np.array([0.43, 0.52])], 0., 10.0, 0.5))
def testRewardMultiplier(self, sprite_positions, terminate_bonus,
reward_multiplier, reward):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=(0.5, 0.5),
terminate_distance=0.1,
terminate_bonus=terminate_bonus,
raw_reward_multiplier=reward_multiplier)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
@parameterized.parameters(
([np.array([0.35, 0.52])], 1., 0.),
([np.array([0.43, 0.52])], 1., 2.4),
([np.array([0.43, 0.52])], 3., 4.4),
([np.array([0.43, 0.52]), np.array([0.4, 0.55])], 1., 0.),
([np.array([0.43, 0.52]), np.array([0.43, 0.52])], 1., 3.7),
([np.array([0.43, 0.52]), np.array([0.43, 0.52])], 3., 5.7))
def testSparseReward(self, sprite_positions, terminate_bonus, reward):
sprites = self._mock_sprites(sprite_positions)
task = tasks.FindGoalPosition(
goal_position=(0.5, 0.5),
terminate_distance=0.1,
sparse_reward=True,
terminate_bonus=terminate_bonus)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
def testFilterDistrib(self):
sprites = [
sprite.Sprite(x=0.45, y=0.45, c0=64),
sprite.Sprite(x=0.45, y=0.55, c0=128),
sprite.Sprite(x=0.55, y=0.45, c0=192),
sprite.Sprite(x=0.4, y=0.4, c0=255),
]
filter_distribs = [
distribs.Continuous('c0', 0, 65), # selects sprites[:1]
distribs.Continuous('c0', 0, 129), # selects sprites[:2]
distribs.Continuous('c0', 0, 193), # selects sprites[:3]
distribs.Continuous('c0', 0, 256), # selects sprites[:4]
distribs.Continuous('c0', 65, 256), # selects sprites[1:4]
]
task_list = [
tasks.FindGoalPosition(
filter_distrib=x, goal_position=(0.5, 0.5), terminate_distance=0.1)
for x in filter_distribs
]
rewards = [1.5, 2.9, 4.4, 2.3, 0.9]
successes = [True, True, True, False, False]
for t, r, s in zip(task_list, rewards, successes):
self.assertAlmostEqual(t.reward(sprites), r, delta=0.1)
self.assertEqual(t.success(sprites), s)
def testNoFilteredSprites(self):
sprites = [sprite.Sprite(x=0.45, y=0.45, c0=255)]
filter_distrib = distribs.Continuous('c0', 0, 254)
r = tasks.FindGoalPosition(
filter_distrib=filter_distrib,
goal_position=(0.5, 0.5),
terminate_distance=0.1).reward(sprites)
self.assertTrue(np.isnan(r))
class ClusteringTest(parameterized.TestCase):
def setUp(self):
super(ClusteringTest, self).setUp()
self.sprites = [
sprite.Sprite(x=0.2, y=0.2, c0=64),
sprite.Sprite(x=0.3, y=0.3, c0=128),
sprite.Sprite(x=0.8, y=0.9, c0=192),
sprite.Sprite(x=0.9, y=0.8, c0=255),
]
self.cluster_distribs = [
distribs.Continuous('c0', 0, 129),
distribs.Continuous('c0', 190, 256),
]
@parameterized.parameters(
([[0.2, 0.2], [0.21, 0.21], [0.8, 0.8], [0.81, 0.81]], 287.5
, True),
([[0.2, 0.2], [0.25, 0.25], [0.8, 0.8], [0.81, 0.81]], 84.2, True),
([[0.2, 0.2], [0.53, 0.53], [0.8, 0.8], [0.81, 0.81]], 0.4, True),
([[0.2, 0.53], [0.53, 0.2], [0.8, 0.8], [0.81, 0.81]], 0.4, True),
([[0.2, 0.2], [0.53, 0.53], [0.8, 0.8], [0.9, 0.9]], -1.2, False),
([[0.2, 0.2], [0.53, 0.53], [0.8, 0.9], [0.9, 0.8]], -1.2, False),
)
def test4Sprites(self, positions, reward, success):
sprites = [
sprite.Sprite(x=positions[0][0], y=positions[0][1], c0=64),
sprite.Sprite(x=positions[1][0], y=positions[1][1], c0=128),
sprite.Sprite(x=positions[2][0], y=positions[2][1], c0=192),
sprite.Sprite(x=positions[3][0], y=positions[3][1], c0=255),
]
cluster_distribs = [
distribs.Continuous('c0', 0, 129),
distribs.Continuous('c0', 190, 256),
]
task = tasks.Clustering(cluster_distribs=cluster_distribs)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
self.assertEqual(task.success(sprites), success)
@parameterized.parameters(
([[0.2, 0.2], [0.3, 0.3]], [[0.8, 0.8], [0.8, 0.9], [0.9, 0.9]], 18.7),
([[0.2, 0.2], [0.3, 0.3]], [[0.8, 0.8], [0.8, 0.9], [0.9, 0.2]], -2.9),
([[0.2, 0.2], [0.3, 0.3], [0.25, 0.3]], [[0.8, 0.8], [0.8, 0.9],
[0.9, 0.9]], 21.2),
([[0.2, 0.2], [0.3, 0.3], [0.4, 0.8]], [[0.8, 0.8], [0.8, 0.9],
[0.9, 0.9]], -1.8),
)
def testMoreSprites(self, positions_0, positions_1, reward):
sprites = [sprite.Sprite(x=p[0], y=p[1], c0=75) for p in positions_0]
sprites.extend([sprite.Sprite(x=p[0], y=p[1], c0=225) for p in positions_1])
cluster_distribs = [
distribs.Continuous('c0', 50, 100),
distribs.Continuous('c0', 200, 250),
]
task = tasks.Clustering(cluster_distribs=cluster_distribs)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
def test3Clusters(self):
sprites = [
sprite.Sprite(x=0.2, y=0.2, c0=64),
sprite.Sprite(x=0.3, y=0.3, c0=64),
sprite.Sprite(x=0.8, y=0.9, c0=128),
sprite.Sprite(x=0.9, y=0.8, c0=128),
sprite.Sprite(x=0.8, y=0.9, c0=255),
sprite.Sprite(x=0.9, y=0.8, c0=255),
]
cluster_distribs = [
distribs.Continuous('c0', 0, 100),
distribs.Continuous('c0', 100, 150),
distribs.Continuous('c0', 200, 256),
]
task = tasks.Clustering(cluster_distribs=cluster_distribs)
self.assertAlmostEqual(task.reward(sprites), 17.5, delta=0.1)
@parameterized.parameters(
(2.5, 17.5),
(5., 5.),
(10., -20.),
)
def testTerminationThreshold(self, termination_threshold, reward):
task = tasks.Clustering(
cluster_distribs=self.cluster_distribs,
termination_threshold=termination_threshold)
self.assertAlmostEqual(task.reward(self.sprites), reward, delta=0.1)
@parameterized.parameters(
(2.5, 0., 17.5),
(2.5, 5., 22.5),
(5., 3., 8.),
(10., 7., -20.),
)
def testTerminateBonus(self, termination_threshold, terminate_bonus, reward):
task = tasks.Clustering(
cluster_distribs=self.cluster_distribs,
terminate_bonus=terminate_bonus,
termination_threshold=termination_threshold)
self.assertAlmostEqual(task.reward(self.sprites), reward, delta=0.1)
@parameterized.parameters(
(2.5, 10., 17.5),
(2.5, 5., 8.8),
(5., 3., 1.5),
(10., 7., -14.),
)
def testRewardRange(self, termination_threshold, reward_range, reward):
task = tasks.Clustering(
cluster_distribs=self.cluster_distribs,
reward_range=reward_range,
termination_threshold=termination_threshold)
self.assertAlmostEqual(task.reward(self.sprites), reward, delta=0.1)
@parameterized.parameters(
(2.5, 0., 17.5),
(7., 0., 0.),
(10., 0., 0.),
(5., 5., 10.),
)
def testSparseReward(self, termination_threshold, terminate_bonus, reward):
task = tasks.Clustering(
cluster_distribs=self.cluster_distribs,
sparse_reward=True,
terminate_bonus=terminate_bonus,
termination_threshold=termination_threshold)
self.assertAlmostEqual(task.reward(self.sprites), reward, delta=0.1)
class MetaAggregatedTest(parameterized.TestCase):
def setUp(self):
super(MetaAggregatedTest, self).setUp()
self.subtasks = [
tasks.FindGoalPosition(
filter_distrib=distribs.Continuous('c0', 0, 100),
goal_position=np.array([0.2, 0.2]),
terminate_distance=0.1),
tasks.FindGoalPosition(
filter_distrib=distribs.Continuous('c0', 100, 200),
goal_position=np.array([0.5, 0.5]),
terminate_distance=0.1,
terminate_bonus=5.0),
tasks.FindGoalPosition(
filter_distrib=distribs.Continuous('c0', 200, 256),
goal_position=np.array([0.8, 0.8]),
terminate_distance=0.1,
terminate_bonus=10.0),
]
self.success_sprites = [
sprite.Sprite(x=0.2, y=0.2, c0=50),
sprite.Sprite(x=0.5, y=0.45, c0=150),
sprite.Sprite(x=0.85, y=0.75, c0=250),
]
self.success_rewards = [5., 7.5, 11.5]
self.failure_sprites = [
sprite.Sprite(x=0.2, y=0.8, c0=50),
sprite.Sprite(x=0.3, y=0.45, c0=150),
sprite.Sprite(x=0.9, y=0.75, c0=250),
]
self.failure_rewards = [-25., -5.3, -0.6]
def _get_sprites_and_reward_list(self, successes):
success_inds = np.nonzero(successes)[0]
failure_inds = np.nonzero(np.logical_not(successes))[0]
sprites = [self.success_sprites[i] for i in success_inds]
sprites.extend([self.failure_sprites[i] for i in failure_inds])
reward_list = [self.success_rewards[i] for i in success_inds]
reward_list.extend([self.failure_rewards[i] for i in failure_inds])
return sprites, reward_list
@parameterized.parameters(
('all', (True, True, True), True),
('all', (True, True, False), False),
('all', (True, False, False), False),
('all', (False, False, False), False),
('any', (True, True, True), True),
('any', (True, True, False), True),
('any', (True, False, False), True),
('any', (False, False, False), False),
)
def testSum(self, termination_criterion, successes, success):
task = tasks.MetaAggregated(
self.subtasks,
reward_aggregator='sum',
termination_criterion=termination_criterion)
sprites, reward_list = self._get_sprites_and_reward_list(successes)
self.assertAlmostEqual(task.reward(sprites), sum(reward_list), delta=0.1)
self.assertEqual(task.success(sprites), success)
@parameterized.parameters(
((True, True, True),),
((True, True, False),),
((True, False, False),),
((False, False, False),),
)
def testMax(self, successes):
task = tasks.MetaAggregated(
self.subtasks,
reward_aggregator='max')
sprites, reward_list = self._get_sprites_and_reward_list(successes)
self.assertAlmostEqual(task.reward(sprites), max(reward_list), delta=0.1)
@parameterized.parameters(
((True, True, True),),
((True, True, False),),
((True, False, False),),
((False, False, False),),
)
def testMin(self, successes):
task = tasks.MetaAggregated(self.subtasks, reward_aggregator='min')
sprites, reward_list = self._get_sprites_and_reward_list(successes)
self.assertAlmostEqual(task.reward(sprites), min(reward_list), delta=0.1)
@parameterized.parameters(
((True, True, True),),
((True, True, False),),
((True, False, False),),
((False, False, False),),
)
def testMean(self, successes):
task = tasks.MetaAggregated(self.subtasks, reward_aggregator='mean')
sprites, reward_list = self._get_sprites_and_reward_list(successes)
self.assertAlmostEqual(
task.reward(sprites), np.mean(reward_list), delta=0.1)
@parameterized.parameters(
((True, True, True), 'sum', 0., 24.),
((True, True, True), 'sum', 5., 29.),
((True, True, False), 'sum', 5., 11.9),
((True, True, True), 'min', 0., 5.),
((True, True, True), 'min', 5., 10.),
((True, True, False), 'min', 5., -0.6),
)
def testTerminateBonus(self, successes, reward_aggregator, terminate_bonus,
reward):
task = tasks.MetaAggregated(
self.subtasks,
reward_aggregator=reward_aggregator,
terminate_bonus=terminate_bonus)
sprites, _ = self._get_sprites_and_reward_list(successes)
self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/tasks_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for sprite_generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from spriteworld import factor_distributions as distribs
from spriteworld import sprite
from spriteworld import sprite_generators
_distrib_0 = distribs.Product([
distribs.Discrete('x', [0.5]),
distribs.Discrete('y', [0.5]),
distribs.Discrete('shape', ['square', 'triangle']),
distribs.Discrete('c0', [255]),
distribs.Discrete('c1', [255]),
distribs.Discrete('c2', [255]),
])
_distrib_1 = distribs.Product([
distribs.Discrete('x', [0.5]),
distribs.Discrete('y', [0.5]),
distribs.Discrete('shape', ['hexagon', 'circle', 'star_5']),
distribs.Discrete('c0', [255]),
distribs.Discrete('c1', [255]),
distribs.Discrete('c2', [255]),
])
class SpriteGeneratorTest(parameterized.TestCase):
@parameterized.parameters(1, 2, 5)
def testGenerateSpritesLengthType(self, num_sprites):
g = sprite_generators.generate_sprites(_distrib_0, num_sprites=num_sprites)
sprite_list = g()
self.assertIsInstance(sprite_list, list)
self.assertLen(sprite_list, num_sprites)
self.assertIsInstance(sprite_list[0], sprite.Sprite)
def testGenerateSpritesCallableNum(self):
minval = 3
maxval = 6
num_sprites = np.random.randint(minval, maxval)
g = sprite_generators.generate_sprites(_distrib_0, num_sprites=num_sprites)
sprite_list = g()
self.assertGreaterEqual(len(sprite_list), minval)
self.assertLess(len(sprite_list), maxval)
class ChainGeneratorsTest(absltest.TestCase):
def testOutput(self):
g_0 = sprite_generators.generate_sprites(_distrib_0, num_sprites=1)
g_1 = sprite_generators.generate_sprites(_distrib_1, num_sprites=2)
g_chain = sprite_generators.chain_generators(g_0, g_1)
sprite_list = g_chain()
self.assertIsInstance(sprite_list, list)
self.assertLen(sprite_list, 3)
self.assertTrue(_distrib_0.contains(sprite_list[0].factors))
self.assertTrue(_distrib_1.contains(sprite_list[1].factors))
self.assertTrue(_distrib_1.contains(sprite_list[2].factors))
class SampleGeneratorTest(absltest.TestCase):
def testOutput(self):
g_0 = sprite_generators.generate_sprites(_distrib_0, num_sprites=1)
g_1 = sprite_generators.generate_sprites(_distrib_1, num_sprites=1)
g_chain = sprite_generators.sample_generator((g_0, g_1))
sprite_list = g_chain()
self.assertIsInstance(sprite_list, list)
self.assertLen(sprite_list, 1)
self.assertNotEqual(
_distrib_0.contains(sprite_list[0].factors),
_distrib_1.contains(sprite_list[0].factors))
class ShuffleTest(absltest.TestCase):
def testOutput(self):
g = sprite_generators.generate_sprites(_distrib_0, num_sprites=5)
g_shuffle = sprite_generators.shuffle(g)
sprite_list = g_shuffle()
self.assertIsInstance(sprite_list, list)
self.assertLen(sprite_list, 5)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/sprite_generators_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
from six.moves import range
from spriteworld import action_spaces
from spriteworld import environment
from spriteworld import renderers
from spriteworld import sprite
from spriteworld import tasks
class EnvironmentTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def assertValidObservation(self, observation):
# Override this method from test_utils.EnvironmentTestMixin to make it
# support a dict of specs as observation.
observation_spec = self.environment.observation_spec()
for k, v in observation.items():
self.assertConformsToSpec(v, observation_spec[k])
def make_object_under_test(self):
"""Environment creator used by test_utils.EnvironmentTestMixin."""
env = environment.Environment(
task=tasks.NoReward(),
action_space=action_spaces.SelectMove(),
renderers={},
init_sprites=lambda: [sprite.Sprite(c0=255)],
max_episode_length=7)
return env
def testMaxEpisodeLength(self):
env = self.make_object_under_test()
action = np.array([0.5, 0.5, 0.5, 0.5])
env.step(action)
for _ in range(3):
for _ in range(6):
timestep = env.step(action)
self.assertTrue(timestep.mid())
timestep = env.step(action)
self.assertTrue(timestep.last())
timestep = env.step(action)
self.assertTrue(timestep.first())
def testTaskTermination(self):
task = tasks.FindGoalPosition(goal_position=(0.5, 0.5))
action_space = action_spaces.SelectMove()
env_renderers = {}
init_sprites = lambda: [sprite.Sprite(x=0.25, y=0.25, c0=255)]
env = environment.Environment(task, action_space, env_renderers,
init_sprites)
donothing_action = np.array([0.25, 0.25, 0.5, 0.5])
success_action = np.array([0.25, 0.25, 0.75, 0.75])
timestep = env.step(donothing_action)
self.assertTrue(timestep.first())
timestep = env.step(donothing_action)
self.assertTrue(timestep.mid())
timestep = env.step(success_action)
self.assertTrue(timestep.last())
timestep = env.step(success_action)
self.assertTrue(timestep.first())
class EnvironmentRenderersTest(absltest.TestCase):
def make_object_under_test(self, renderer):
self.environment = environment.Environment(
task=tasks.NoReward(),
action_space=action_spaces.SelectMove(),
renderers={'obs': renderer},
init_sprites=lambda: [sprite.Sprite(c0=255)],
max_episode_length=7)
def testSpriteFactors(self):
self.make_object_under_test(renderer=renderers.SpriteFactors())
self.environment.observation_spec()
action = np.array([0.5, 0.5, 0.5, 0.5])
self.environment.step(action)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/environment_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for action_spaces."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from spriteworld import action_spaces
from spriteworld import sprite
class SelectMoveTest(parameterized.TestCase):
@parameterized.named_parameters(
('Motion', 1, np.array([0.5, 0.5, 0.2, 0.75]), (-0.3, 0.25)),
('SameMotion', 1, np.array([0.2, 0.5, 0.2, 0.75]), (-0.3, 0.25)),
('SmallerScale', 0.5, np.array([0.2, 0.5, 0.2, 0.75]), (-0.15, 0.125)),
)
def testGetMotion(self, scale, action, true_motion):
action_space = action_spaces.SelectMove(scale=scale)
motion = action_space.get_motion(action)
self.assertTrue(np.allclose(motion, true_motion, atol=1e-4))
@parameterized.named_parameters(
('NoCost', 1, np.array([0.5, 0.5, 0.2, 0.75]), 0., 0.),
('Cost', 1, np.array([0.5, 0.5, 0.2, 0.75]), 1., -0.39),
('SameCost', 1, np.array([0.2, 0.3, 0.2, 0.75]), 1., -0.39),
('LowerCost', 0.5, np.array([0.5, 0.5, 0.2, 0.75]), 1., -0.195),
)
def testMotionCost(self, scale, action, motion_cost, true_cost):
action_space = action_spaces.SelectMove(
scale=scale, motion_cost=motion_cost)
cost = action_space.step(action, sprites=[], keep_in_frame=False)
self.assertAlmostEqual(cost, true_cost, delta=0.01)
def testMoveSprites(self):
"""Take a series of actions and repeatedly check sprite motions."""
action_space = action_spaces.SelectMove(scale=0.5)
sprites = [sprite.Sprite(x=0.55, y=0.5), sprite.Sprite(x=0.5, y=0.5)]
# Move second (top) sprite
action_space.step(
np.array([0.52, 0.52, 0.5, 0.48]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.55, 0.5], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.58, 0.5, 0.9, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move neither sprite
action_space.step(
np.array([0.58, 0.5, 0.9, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move second (top) sprite
action_space.step(
np.array([0.5, 0.5, 0.2, 0.5]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.78, 0.74, 0.9, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.95, 0.9], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.92, 0.9, 0.9, 0.5]), sprites, keep_in_frame=True)
self.assertTrue(np.allclose(sprites[0].position, [1., 0.9], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.98, 0.9, 0.7, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [1.1, 1.1], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
@parameterized.named_parameters(
('NoNoise', np.array([0.5, 0.5, 0.2, 0.75]), 0.),
('Noise', np.array([0.5, 0.5, 0.2, 0.75]), 0.2),
('NoiseOutOfBounds', np.array([0.2, 0.3, 0.9, 0.05]), 0.2),
('HighNoise', np.array([0.5, 0.5, 0.2, 0.75]), 0.5))
def testNoiseScale(self, action, noise_scale):
action_space = action_spaces.SelectMove(scale=0.1, noise_scale=noise_scale)
action_space.apply_noise_to_action(action)
class DragAndDropTest(parameterized.TestCase):
@parameterized.named_parameters(
('MoveUpRight', 1, np.array([0.5, 0.5, 0.75, 0.75]), (0.25, 0.25)),
('MoveDownLeft', 1, np.array([0.2, 0.5, -0.2, -0.75]), (-0.4, -1.25)),
('ScaledMove', 0.5, np.array([0.5, 0.5, 0.8, 0.8]), (0.15, 0.15)),
('MoveEdge', 0.5, np.array([0.0, 0.0, -0.2, -0.4]), (-0.1, -0.2)),
)
def testGetMotion(self, scale, action, true_motion):
action_space = action_spaces.DragAndDrop(scale=scale)
motion = action_space.get_motion(action)
self.assertTrue(np.allclose(motion, true_motion, atol=1e-4))
def testMoveSprites(self):
"""Take a series of actions and repeatedly check sprite motions."""
action_space = action_spaces.DragAndDrop(scale=0.5)
sprites = [sprite.Sprite(x=0.55, y=0.5), sprite.Sprite(x=0.5, y=0.5)]
# Move second (top) sprite
action_space.step(
np.array([0.52, 0.52, 0.52, 0.5]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.55, 0.5], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.58, 0.5, 0.98, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move neither sprite
action_space.step(
np.array([0.58, 0.5, 0.9, 0.9]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.5, 0.49], atol=1e-5))
# Move second (top) sprite
action_space.step(
np.array([0.5, 0.5, 0.2, 0.5]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.75, 0.7], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.78, 0.74, 0.98, 0.94]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [0.85, 0.8], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.82, 0.8, 1.3, 1.0]), sprites, keep_in_frame=True)
self.assertTrue(np.allclose(sprites[0].position, [1., 0.9], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
# Move first (bottom) sprite
action_space.step(
np.array([0.99, 0.9, 1.19, 1.3]), sprites, keep_in_frame=False)
self.assertTrue(np.allclose(sprites[0].position, [1.1, 1.1], atol=1e-5))
self.assertTrue(np.allclose(sprites[1].position, [0.35, 0.49], atol=1e-5))
class EmbodiedTest(parameterized.TestCase):
@parameterized.named_parameters(
('Up', 0, 0.1, (0., 0.1)),
('Down', 2, 0.1, (0., -0.1)),
('Left', 1, 0.1, (-0.1, 0.)),
('Right', 3, 0.1, (0.1, 0.)),
('MoreRight', 3, 0.5, (0.5, 0.)),
)
def testGetMotion(self, motion_action, step_size, true_motion):
action_space = action_spaces.Embodied(step_size=step_size)
motion = action_space.action_to_motion[motion_action]
self.assertTrue(np.allclose(motion, true_motion, atol=1e-5))
@parameterized.named_parameters(
dict(
testcase_name='Up',
init_positions=[[0.5, 0.5], [0.2, 0.8]],
action=(0, 0),
final_positions=[[0.5, 0.5], [0.2, 0.9]]),
dict(
testcase_name='UpCarry',
init_positions=[[0.5, 0.5], [0.2, 0.8]],
action=(1, 0),
final_positions=[[0.5, 0.5], [0.2, 0.9]]),
dict(
testcase_name='RightCarry',
init_positions=[[0.5, 0.5], [0.45, 0.55]],
action=(1, 3),
final_positions=[[0.6, 0.5], [0.55, 0.55]]),
dict(
testcase_name='LeftCarry',
init_positions=[[0.5, 0.5], [0.45, 0.55]],
action=(1, 1),
final_positions=[[0.4, 0.5], [0.35, 0.55]]),
dict(
testcase_name='DownCarry',
init_positions=[[0.5, 0.5], [0.45, 0.55]],
action=(1, 2),
final_positions=[[0.5, 0.4], [0.45, 0.45]]),
dict(
testcase_name='StayInBounds',
init_positions=[[0.95, 0.02], [0.95, 0.05]],
action=(1, 3),
final_positions=[[1., 0.02], [1., 0.05]]),
dict(
testcase_name='GoOutOfBound',
init_positions=[[0.95, 0.02], [0.95, 0.05]],
action=(1, 3),
final_positions=[[1.05, 0.02], [1.05, 0.05]],
keep_in_frame=False),
dict(
testcase_name='MoveCorrectEmbodied',
init_positions=[[0.45, 0.55], [0.5, 0.5], [0.45, 0.55]],
action=(1, 3),
final_positions=[[0.45, 0.55], [0.6, 0.5], [0.55, 0.55]]),
)
def testMoveSprites(self,
init_positions,
action,
final_positions,
keep_in_frame=True):
"""Take a series of actions and repeatedly check sprite motions."""
action_space = action_spaces.Embodied(step_size=0.1)
sprites = [
sprite.Sprite(x=pos[0], y=pos[1], shape='square', scale=0.15)
for pos in init_positions
]
action_space.step(action, sprites, keep_in_frame=keep_in_frame)
for s, p in zip(sprites, final_positions):
self.assertTrue(np.allclose(s.position, p, atol=1e-5))
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/action_spaces_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
|
spriteworld-master
|
tests/renderers/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for pil_renderer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
from absl.testing import absltest
import numpy as np
from spriteworld import sprite
from spriteworld.renderers import pil_renderer
class PilRendererTest(absltest.TestCase):
def _get_sprites(self):
"""Get list of sprites."""
sprites = [
sprite.Sprite(
x=0.75, y=0.95, shape='spoke_6', scale=0.2, c0=20, c1=50, c2=80),
sprite.Sprite(
x=0.2, y=0.3, shape='triangle', scale=0.1, c0=150, c1=255, c2=100),
sprite.Sprite(
x=0.7, y=0.5, shape='square', scale=0.3, c0=0, c1=255, c2=0),
sprite.Sprite(
x=0.5, y=0.5, shape='square', scale=0.3, c0=255, c1=0, c2=0),
]
return sprites
def testBasicFunctionality(self):
renderer = pil_renderer.PILRenderer(image_size=(64, 64))
renderer.render(self._get_sprites())
def testBackground(self):
bg_color = (5, 6, 7)
renderer = pil_renderer.PILRenderer(image_size=(64, 64), bg_color=bg_color)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[5, 5]), bg_color)
def testOcclusion(self):
renderer = pil_renderer.PILRenderer(image_size=(64, 64))
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[32, 32]), [255, 0, 0])
self.assertSequenceEqual(list(image[32, 50]), [0, 255, 0])
def testAntiAliasing(self):
renderer = pil_renderer.PILRenderer(image_size=(16, 16), anti_aliasing=5)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[4, 6]), [0, 0, 0])
self.assertSequenceEqual(list(image[6, 6]), [255, 0, 0])
# Python2 and Python3 give slightly different anti-aliasing, so we specify
# bounds for border values:
self.assertTrue(all(image[5, 6] >= [50, 0, 0]))
self.assertTrue(all(image[5, 6] <= [120, 30, 0]))
self.assertTrue(all(image[7, 6] >= [200, 0, 0]))
self.assertTrue(all(image[7, 6] <= [255, 50, 0]))
renderer = pil_renderer.PILRenderer(image_size=(16, 16), anti_aliasing=1)
image = renderer.render(self._get_sprites())
self.assertSequenceEqual(list(image[4, 6]), [0, 0, 0])
self.assertSequenceEqual(list(image[6, 6]), [255, 0, 0])
self.assertSequenceEqual(list(image[7, 6]), [255, 0, 0])
def testColorToRGB(self):
s = sprite.Sprite(x=0.5, y=0.5, shape='square', c0=0.2, c1=0.5, c2=0.5)
def _color_to_rgb(c):
return tuple((255 * np.array(colorsys.hsv_to_rgb(*c))).astype(np.uint8))
renderer = pil_renderer.PILRenderer(
image_size=(64, 64), color_to_rgb=_color_to_rgb)
image = renderer.render([s])
self.assertSequenceEqual(list(image[32, 32]), [114, 127, 63])
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/renderers/pil_renderer_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for handcrafted."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from six.moves import range
from spriteworld import constants as const
from spriteworld import sprite as sprite_lib
from spriteworld.renderers import handcrafted
class SpriteFactorsTest(parameterized.TestCase):
def testWrongFactors(self):
handcrafted.SpriteFactors(factors=('x', 'y', 'scale'))
with self.assertRaises(ValueError):
handcrafted.SpriteFactors(factors=('position', 'scale'))
with self.assertRaises(ValueError):
handcrafted.SpriteFactors(factors=('x', 'y', 'size'))
def testSingleton(self):
sprite = sprite_lib.Sprite(
x=0.1, y=0.3, shape='square', scale=0.5, c0=0, c1=0, c2=255)
renderer = handcrafted.SpriteFactors()
renderer.render(sprites=[sprite])
def testSequence(self):
sprites = [
sprite_lib.Sprite(x=np.random.rand(), y=np.random.rand())
for _ in range(5)
]
renderer = handcrafted.SpriteFactors()
renderer.render(sprites=sprites)
@parameterized.parameters(1, 2, 5)
def testOutputLength(self, num_sprites):
sprites = [sprite_lib.Sprite() for _ in range(num_sprites)]
renderer = handcrafted.SpriteFactors()
outputs = renderer.render(sprites=sprites)
self.assertLen(outputs, num_sprites)
@parameterized.parameters((1, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape',
'angle', 'x_vel', 'y_vel')),
(1, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape')),
(2, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape')),
(5, ('x', 'y', 'angle', 'x_vel', 'y_vel')))
def testFactorSubset(self, num_sprites, factors):
sprites = [sprite_lib.Sprite() for _ in range(num_sprites)]
renderer = handcrafted.SpriteFactors(factors=factors)
outputs = renderer.render(sprites=sprites)
output_keys = [set(x) for x in outputs]
self.assertSequenceEqual(output_keys, num_sprites * [set(factors)])
@parameterized.parameters((1, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape',
'angle', 'x_vel', 'y_vel')),
(1, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape')),
(2, ('x', 'y', 'scale', 'c0', 'c1', 'c2', 'shape')),
(5, ('x', 'y', 'angle', 'x_vel', 'y_vel')))
def testObservationSpec(self, num_sprites, factors):
sprites = [sprite_lib.Sprite() for _ in range(num_sprites)]
renderer = handcrafted.SpriteFactors(factors=factors)
renderer.render(sprites=sprites)
obs_spec = renderer.observation_spec()
for v in obs_spec[0].values():
self.assertEqual(v.shape, ())
obs_spec_keys = [set(x) for x in obs_spec]
self.assertSequenceEqual(obs_spec_keys, num_sprites * [set(factors)])
@parameterized.parameters(
(0.5, 0.5, 'square', 0, 0, 255, 0.5, 0),
(0.5, 0.5, 'square', 255, 0, 0, 0.5, 0),
(0.5, 0.8, 'octagon', 0.4, 0.8, 0.5, 0.6, 90),
(0.5, 0.3, 'star_5', 180, 180, 0, 0.2, 240),
)
def testAttributesSingleton(self, x, y, shape, c0, c1, c2, scale, angle):
sprite = sprite_lib.Sprite(
x=x, y=y, shape=shape, c0=c0, c1=c1, c2=c2, scale=scale, angle=angle)
renderer = handcrafted.SpriteFactors()
outputs = renderer.render(sprites=[sprite])[0]
self.assertEqual(outputs['shape'], const.ShapeType[shape].value)
for (name, value) in (('x', x), ('y', y), ('c0', c0), ('c1', c1),
('c2', c2), ('scale', scale), ('angle', angle)):
self.assertAlmostEqual(outputs[name], value, delta=1e-4)
def testAttributesTwoSprites(self):
x = [0.5, 0.3]
y = [0.4, 0.8]
shape = ['square', 'spoke_4']
c0 = [0, 200]
c1 = [255, 100]
c2 = [0, 200]
scale = [0.2, 0.3]
angle = [0, 120]
x_vel = [0.0, 0.1]
y_vel = [-0.2, 0.05]
sprites = []
for i in range(2):
sprites.append(
sprite_lib.Sprite(
x=x[i],
y=y[i],
shape=shape[i],
c0=c0[i],
c1=c1[i],
c2=c2[i],
scale=scale[i],
angle=angle[i],
x_vel=x_vel[i],
y_vel=y_vel[i]))
renderer = handcrafted.SpriteFactors()
outputs = renderer.render(sprites=sprites)
for i in range(2):
self.assertEqual(outputs[i]['shape'], const.ShapeType[shape[i]].value)
for (name, value) in (('x', x), ('y', y), ('c0', c0), ('c1', c1),
('c2', c2), ('scale', scale), ('angle', angle),
('x_vel', x_vel), ('y_vel', y_vel)):
self.assertAlmostEqual(outputs[i][name], value[i], delta=1e-4)
class SpritePassthroughTest(parameterized.TestCase):
def testRenderOne(self):
sprite = sprite_lib.Sprite(
x=0.1, y=0.3, shape='square', scale=0.5, c0=0, c1=0, c2=255)
renderer = handcrafted.SpritePassthrough()
observation = renderer.render(sprites=[sprite])
self.assertEqual(observation, [sprite])
@parameterized.parameters((3,), (5,), (10,))
def testKSprites(self, num_sprites):
sprites = [
sprite_lib.Sprite(x=np.random.rand(), y=np.random.rand())
for _ in range(num_sprites)
]
renderer = handcrafted.SpritePassthrough()
observation = renderer.render(sprites=sprites)
self.assertSequenceEqual(observation, sprites)
obs_spec = renderer.observation_spec()
self.assertTrue(obs_spec.shape, (num_sprites,))
class SuccessTest(absltest.TestCase):
def testRender(self):
renderer = handcrafted.Success()
self.assertTrue(renderer.render(global_state={'success': True}))
self.assertFalse(renderer.render(global_state={'success': False}))
with self.assertRaises(KeyError):
renderer.render(global_state={})
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/renderers/handcrafted_test.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""__init__.py."""
|
spriteworld-master
|
tests/configs/__init__.py
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# python2 python3
"""Tests for task configs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from six.moves import range
from spriteworld import environment
from spriteworld.configs import cobra
from spriteworld.configs import examples
class ConfigsTest(parameterized.TestCase):
@parameterized.parameters(
(cobra.exploration,),
(cobra.goal_finding_more_distractors,),
(cobra.goal_finding_more_targets,),
(cobra.goal_finding_new_position,),
(cobra.goal_finding_new_shape,),
(cobra.clustering,),
(cobra.sorting,),
(examples.goal_finding_embodied,),
)
def testConfig(self, task_module, modes=('train', 'test'), replicas=3):
for mode in modes:
print(mode)
for _ in range(replicas):
config = task_module.get_config(mode=mode)
config['renderers'] = {}
env = environment.Environment(**config)
env.observation_spec()
action = env.action_space.sample()
num_episodes = 0
step = env.reset()
while num_episodes < 5:
if step.first():
num_episodes += 1
step = env.step(action)
if __name__ == '__main__':
absltest.main()
|
spriteworld-master
|
tests/configs/configs_test.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import heapq
import numpy as np
import simulated_environment
C = 12. # constant for l_i
def structured_procrastination(env, n, epsilon, delta, zeta, k0, k_bar,
theta_multiplier):
"""Implementation of Structured Procrastination."""
# The names of the variables used here agree with the pseudocode in the paper,
# except q is used instead of the paper's upper-case Q, and qq is used instead
# of the paper's lower-case q. The pseudocode overloads l: here, ll is used to
# represent the scalar (appears as l in paper), and l is used to represent the
# array (appears as l_i in the paper). For efficiency, we implement the argmin
# in line 10 of the paper with a heap.
k, l, q, qq, r, r_sum, heap = [], [], [], [], [], [], []
beta = np.log2(k_bar / k0)
for i in xrange(n): # Line 2 in paper.
k.append(0)
l.append(
int(np.ceil(C / (epsilon * epsilon) * np.log(3 * beta * n / zeta))))
q.append([])
qq.append(0)
r.append([])
r_sum.append(0)
heapq.heappush(heap, (0, i))
for ll in xrange(l[i]): # Line 6 in paper.
r[i].append(0)
q[i].append((ll, k0))
# Main loop.
current_delta = 1
iter_count = 0
while current_delta > delta: # Line 9, but stop when target delta reached.
iter_count += 1
_, i = heapq.heappop(heap)
ll, theta = q[i].pop(0)
if r[i][ll] == 0: # Line 12 in paper.
k[i] += 1
qq[i] = int(
np.ceil(
C /
(epsilon * epsilon) * np.log(3 * beta * n * k[i] * k[i] / zeta)))
did_timeout, elapsed = env.run(config_id=i, timeout=theta, instance_id=ll)
if not did_timeout: # Line 15 in paper.
r_sum[i] += elapsed - r[i][ll]
r[i][ll] = elapsed
else:
r_sum[i] += theta - r[i][ll]
r[i][ll] = theta
q[i].append((ll, theta_multiplier * theta))
while len(q[i]) < qq[i]: # Line 20 in paper.
l[i] += 1
r[i].append(0)
q[i].insert(0, (l[i] - 1, theta))
heapq.heappush(heap, (r_sum[i] / k[i], i)) # Bookeeping for the heap.
if iter_count % 10000 == 0:
# Recalculate delta, as in line 25 of the paper. We recalculate it within
# this while loop, because instead of an anytime algorithm, we want one
# that stops when the target delta is reached.
i_star = np.argmax(r_sum)
current_delta = np.sqrt(1 + epsilon) * qq[i_star] / k[i_star]
print('iter_count={}, delta={}, theta={}, total runtime so far={}'.format(
iter_count, current_delta, theta, env.get_total_runtime()))
return i_star, current_delta
def main():
parser = argparse.ArgumentParser(
description='Executes Structured Procrastination '
'with a simulated environment.')
parser.add_argument('--epsilon', help='Epsilon from the paper',
type=float, default=0.2)
parser.add_argument('--delta', help='Delta from the paper',
type=float, default=0.2)
parser.add_argument('--zeta', help='Zeta from the paper',
type=float, default=0.1)
parser.add_argument('--k0', help='Kappa_0 from the paper',
type=float, default=1.)
parser.add_argument('--k-bar', help='bar{Kappa} from the paper',
type=float, default=1000000.)
parser.add_argument('--theta-multiplier',
help='Theta multiplier from the paper',
type=float, default=2.0)
parser.add_argument('--measurements-filename',
help='Filename to load measurement results from',
type=str, default='measurements.dump')
parser.add_argument('--measurements-timeout',
help='Timeout (seconds) used for the measurements',
type=float, default=900.)
args = vars(parser.parse_args())
epsilon = args['epsilon']
delta = args['delta']
zeta = args['zeta']
k0 = args['k0']
k_bar = args['k_bar']
theta_multiplier = args['theta_multiplier']
results_file = args['measurements_filename']
timeout = args['measurements_timeout']
env = simulated_environment.Environment(results_file, timeout)
num_configs = env.get_num_configs()
best_config_index, delta = structured_procrastination(
env, num_configs, epsilon, delta, zeta, k0, k_bar, theta_multiplier)
print('best_config_index={}, delta={}'.format(best_config_index, delta))
env.print_config_stats(best_config_index)
def format_runtime(runtime):
return '{}s = {}m = {}h = {}d'.format(
runtime, runtime / 60, runtime / 3600, runtime / (3600 * 24))
print('total runtime: ' + format_runtime(env.get_total_runtime()))
print('total resumed runtime: ' +
format_runtime(env.get_total_resumed_runtime()))
if __name__ == '__main__':
main()
|
leaps-and-bounds-master
|
structured_procrastination.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import numpy as np
import simulated_environment
R = 44 # Constant used for calculating b.
R2 = 32 # Constant used for the stopping condition (Appendix D, line 25).
def leaps_and_bounds(env, n, epsilon, delta, zeta, k0, theta_multiplier):
"""Implementation of LeapsAndBounds."""
# This implementation makes some adjustments to the constants that control
# how the failure probability budget zeta is allocated between the different
# high-probability events that guarantee correctness.
theta = k0 * 16. / 7
k = 0
while True:
k += 1
b = int(
math.ceil(R * math.log(40 * 3 * n * k * (k + 1) / zeta) /
(delta * epsilon * epsilon)))
print('b={}, theta={}, total runtime so far={}'.format(
b, theta, env.get_total_runtime()))
q_hat = []
for i in xrange(n):
q_hat_i = ebgstop_slave_alg(env, i, b, delta, theta, k, epsilon, zeta, n)
q_hat.append(q_hat_i)
if np.min(q_hat) < theta:
best_config_index = np.argmin(q_hat)
return (best_config_index, q_hat[best_config_index],
4 * theta / (3 * delta))
theta *= theta_multiplier
def ebgstop_slave_alg(env, i, b, delta, theta, k, epsilon, zeta, n):
"""Implementation of RuntimeEst with EBGStop, as in Appendix D in paper."""
beta = 1.10
t = b * theta # Corresponds to T in the paper.
tau = 4 * theta / (3 * delta)
q = []
sumq, sum_q_squared = 0, 0
kk = 0 # Corresponds to l in the paper.
for j in xrange(b):
elapsed = 0
if t > 0:
_, elapsed = env.run(config_id=i, instance_id=j,
timeout=min(t, tau))
t -= elapsed
q.append(elapsed)
sumq += elapsed
sum_q_squared += elapsed * elapsed
if t == 0:
return theta
q_mean = sumq / (j + 1)
q_var = max((sum_q_squared - q_mean * sumq) / (j + 1), 0)
if j + 1 > np.floor(np.power(beta, kk)):
kk += 1
alpha = np.floor(np.power(beta, kk)) / np.floor(np.power(beta, kk - 1))
dk = (2.1 * k ** 1.5 * 2.61238 * (kk ** 1.1) * 10.5844 * n) / zeta
x = alpha * np.log(3 * dk)
if j > 0:
confidence = np.sqrt(q_var * 2 * x / (j + 1)) + 3 * tau * x / (j + 1)
lower_bound = q_mean - confidence
if (1 + 3 * epsilon / 7) * lower_bound > theta and q_mean > theta:
return theta
d_prime = zeta / (40 * 3 * n * k * (k + 1) * j * (j + 1))
r2 = math.ceil(-R2 * np.log(d_prime) / delta)
if j + 1 >= r2 and confidence <= epsilon / 3 * (q_mean + lower_bound):
return q_mean
return np.mean(q)
def main():
parser = argparse.ArgumentParser(
description='Executes LeapsAndBounds with a simulated environment.')
parser.add_argument('--epsilon', help='Epsilon from the paper',
type=float, default=0.2)
parser.add_argument('--delta', help='Delta from the paper',
type=float, default=0.2)
parser.add_argument('--zeta', help='Zeta from the paper',
type=float, default=0.1)
parser.add_argument('--k0', help='Kappa_0 from the paper',
type=float, default=1.)
parser.add_argument('--theta-multiplier',
help='Theta multiplier from the paper',
type=float, default=1.25)
parser.add_argument('--measurements-filename',
help='Filename to load measurement results from',
type=str, default='measurements.dump')
parser.add_argument('--measurements-timeout',
help='Timeout (seconds) used for the measurements',
type=float, default=900.)
args = vars(parser.parse_args())
epsilon = args['epsilon']
delta = args['delta']
zeta = args['zeta']
k0 = args['k0']
theta_multiplier = args['theta_multiplier']
results_file = args['measurements_filename']
timeout = args['measurements_timeout']
env = simulated_environment.Environment(results_file, timeout)
num_configs = env.get_num_configs()
best_config_index, capped_avg, tau = leaps_and_bounds(
env, num_configs, epsilon, delta, zeta, k0, theta_multiplier)
print('best_config_index={}, capped_avg={}, tau={}'.format(
best_config_index, capped_avg, tau))
env.print_config_stats(best_config_index, tau=tau)
def format_runtime(runtime):
return '{}s = {}m = {}h = {}d'.format(
runtime, runtime / 60, runtime / 3600, runtime / (3600 * 24))
print('total runtime: ' + format_runtime(env.get_total_runtime()))
print('total resumed runtime: ' +
format_runtime(env.get_total_resumed_runtime()))
if __name__ == '__main__':
main()
|
leaps-and-bounds-master
|
leapsandbounds.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pickle
import numpy as np
class Environment(object):
"""This class is used for simulating runs and collecting statistics."""
def __init__(self, results_file, timeout):
"""Prepares an instance that can simulate runs based on a measurements file.
Args:
results_file: the location of the pickle dump containing the results
of the runtime measurements.
timeout: the timeout used for the runtime measurements.
"""
self._timeout = timeout
# Load measurements.
with open(results_file, 'r') as f:
results = pickle.load(f)
self._results = [results[k] for k in sorted(results.keys())]
self._instance_count = len(self._results[0])
self.reset()
def reset(self):
"""Reset the state of the environment."""
# Total runtime, without resuming, of any configuration ran on any instance.
# Without resuming means that if the same configuration-instance pair is
# run, `total_runtime` will track the time taken as if the execution had to
# be restarted, rather than resumed from when it was last interrupted
# due to a timeout.
self._total_runtime = 0
# Total runtime, with resuming, of any configuration ran on any instance.
self._total_resumed_runtime = 0
# Dict mapping a configuration to how long it was run, with resuming, on all
# instances combined.
self._runtime_per_config = collections.defaultdict(float)
# Dict mapping a configuration and an instance to how long it ran so far
# in total, with resuming. Summing the runtimes for all instances for a
# configuration will be equal to the relevant value in `runtime_per_config`.
self._ran_so_far = collections.defaultdict(
lambda: collections.defaultdict(float))
def get_num_configs(self):
return len(self._results)
def get_num_instances(self):
return self._instance_count
def get_total_runtime(self):
return self._total_runtime
def get_total_resumed_runtime(self):
return self._total_resumed_runtime
def run(self, config_id, timeout, instance_id=None):
"""Simulates a run of a configuration on an instance with a timeout.
Args:
config_id: specifies which configuration to run. Integer from 0 to
get_num_configs() - 1.
timeout: the timeout to simulate the run with.
instance_id: the instance to run. If not specified, a random instance
will be run.
Raises:
ValueError: if the supplied timeout is larger than self.timeout, the
requested simulation cannot be completed and this error will be raised.
Returns:
A tuple of whether the simulated run timed out, and how long it ran.
"""
if timeout > self._timeout:
raise ValueError('timeout provided is too high to be simulated.')
if instance_id is None:
instance_id = np.random.randint(self._instance_count)
runtime = min(timeout,
self._results[config_id][instance_id % self._instance_count])
self._total_runtime += runtime
resumed_runtime = runtime - self._ran_so_far[config_id][instance_id]
self._runtime_per_config[config_id] += resumed_runtime
self._ran_so_far[config_id][instance_id] = runtime
self._total_resumed_runtime += resumed_runtime
return (timeout <=
self._results[config_id][instance_id % self._instance_count],
runtime)
def print_config_stats(self, config_id, tau=None):
"""Prints statistics about a particular configuration."""
# Compute average runtime capped at TIMEOUT.
average = np.mean([min(self._timeout, r) for r in self._results[config_id]])
print('avg runtime capped at the dataset\'s timeout: {}'.format(average))
timeout_count = 0
for t in self._results[config_id]:
if t > self._timeout:
timeout_count += 1
print('fraction of instances timing out at the timeout of the dataset: {}'.
format(float(timeout_count) / len(self._results[config_id])))
if tau is not None:
timeout_count = 0
for t in self._results[config_id]:
if t > tau:
timeout_count += 1
print('fraction of instances timing out at tau: {}'.format(
float(timeout_count) / len(self._results[config_id])))
with open('runtime_per_config.dump', 'wb') as outf:
pickle.dump(self._runtime_per_config, outf)
|
leaps-and-bounds-master
|
simulated_environment.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for mp_noisy_or package."""
import setuptools
if __name__ == '__main__':
setuptools.setup(
name='mp_noisy_or',
version='0.0.1',
packages=setuptools.find_packages(),
license='Apache 2.0',
author='DeepMind',
description=(
'Code for the ICML 2023 paper "Learning noisy-OR Bayesian Networks'
' with Max-Product Belief Propagation"'
),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='adedieu@google.com',
requires_python='>=3.10',
)
|
max_product_noisy_or-main
|
setup.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train the noisy OR models."""
from ml_collections import config_flags
from mp_noisy_or import noisy_or_bp
from mp_noisy_or import noisy_or_hybrid
from mp_noisy_or import noisy_or_vi
from absl import app
_CONFIGS = config_flags.DEFINE_config_file(
name="config",
default="config.py",
help_string="Training configuration",
)
# pylint: disable=invalid-name
def train(_):
"""Train the noisy OR network on a dataset."""
config = _CONFIGS.value
dataset = config.dataset
method = config.method
# First extract the config for the dataset
if dataset == "20news":
config_BP = config.config_BP_20news
config_VI = config.config_VI_20news
elif dataset == "synthetic":
config_BP = config.config_BP_synthetic
config_VI = config.config_VI_synthetic
elif dataset == "BMF":
config_BP = config.config_BP_BMF
config_VI = config.config_VI_BMF
elif dataset == "2D_deconvolution":
config_BP = config.config_BP_2Ddeconv
config_VI = config.config_VI_2Ddeconv
elif dataset == "overparam":
config_BP = config.config_BP_overparam
config_VI = None
elif dataset == "yelp":
config_BP = config.config_BP_yelp
config_VI = config.config_VI_yelp
elif dataset == "imdb":
config_BP = config.config_BP_imdb
config_VI = config.config_VI_imdb
elif dataset == "abstract":
config_BP = config.config_BP_abstract
config_VI = config.config_VI_abstract
elif dataset == "agnews":
config_BP = config.config_BP_agnews
config_VI = config.config_VI_agnews
elif dataset == "patent":
config_BP = config.config_BP_patent
config_VI = config.config_VI_patent
else:
raise ValueError("Unknown dataset", dataset)
# Second train the selected method
if method == "BP":
noisy_OR = noisy_or_bp.NoisyOR_BP(config_BP)
elif method == "VI":
noisy_OR = noisy_or_vi.NoisyOR_VI(config_VI)
elif method == "hybrid":
noisy_OR = noisy_or_hybrid.NoisyOR_Hybrid(
config_BP=config_BP, config_VI=config_VI
)
else:
raise ValueError("Unknown method", method)
noisy_OR.train()
if __name__ == "__main__":
app.run(train)
|
max_product_noisy_or-main
|
mp_noisy_or/train_noisy_or.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reimplements the authors' approach of http://auai.org/uai2019/proceedings/papers/317.pdf in JAX."""
import datetime
import functools
import itertools
import time
import chex
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tqdm
from mp_noisy_or import data
from mp_noisy_or import utils
# pylint: disable=invalid-name
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
# pylint: disable=comparison-with-itself
def create_indices_arrays(
edges_c2p, X_shape, log_potentials_shape, hidden_visible_vars, leak_node_idx
):
"""Create dictionnaries mapping children to parents and parents' potentials, as well as parents to children and children's potentials."""
dict_child_to_parents = {}
dict_child_to_parents_potentials = {}
dict_parent_to_children = {idx: [] for idx in hidden_visible_vars}
dict_parent_to_children_potentials = {idx: [] for idx in hidden_visible_vars}
# Also return an array mapping nodes to the index connecting them to the leak
if len(log_potentials_shape) == 1:
nodes_to_leak = np.zeros(shape=X_shape)
else:
nodes_to_leak = np.zeros(shape=X_shape + (len(log_potentials_shape),))
for idx_child, idx_parents_idx_potentials in edges_c2p.items():
idx_parents = []
idx_parents_potentials = []
for idx_parent, idx_potential in idx_parents_idx_potentials.items():
if idx_parent == leak_node_idx:
# Node to leak
nodes_to_leak[idx_child] = idx_potential
else:
# Parent to child
dict_parent_to_children[idx_parent].append(idx_child)
dict_parent_to_children_potentials[idx_parent].append(idx_potential)
idx_parents.append(idx_parent)
idx_parents_potentials.append(idx_potential)
# Node to parents
dict_child_to_parents[idx_child] = jnp.array(idx_parents, dtype=int)
dict_child_to_parents_potentials[idx_child] = jnp.array(
idx_parents_potentials, dtype=int
)
# Cast as int
for idx_parent, idx_children in dict_parent_to_children.items():
dict_parent_to_children[idx_parent] = jnp.array(idx_children, dtype=int)
for idx_parent, idx_potentials in dict_parent_to_children_potentials.items():
dict_parent_to_children_potentials[idx_parent] = jnp.array(
idx_potentials, dtype=int
)
nodes_to_leak = jnp.array(nodes_to_leak, dtype=int)
return (
dict_child_to_parents,
dict_child_to_parents_potentials,
dict_parent_to_children,
dict_parent_to_children_potentials,
nodes_to_leak,
)
class NoisyOR_VI:
"""Trains a NoisyOR model with VI by reimplementing the authors' approach."""
def __init__(self, config):
self.config = config
np.random.seed(self.config.seed)
# Load data
(
self.Xv_gt_train,
self.Xv_gt_test,
self.edges_c2p,
self.X_shape,
self.log_potentials_shape,
self.leak_potentials_mask,
self.dont_update_potentials_mask,
self.slice_visible,
self.slice_hidden,
self.leak_node_idx,
) = data.DATA_LOADER[self.config.data.dataset](**self.config.data.args)
# If the data is not an array of fixed shape, we use local models
self.use_local_model = not isinstance(self.Xv_gt_train, np.ndarray)
self.has_multidim_arrays = len(self.X_shape) > 1
assert not self.has_multidim_arrays or not self.use_local_model
# Train-test split
if self.Xv_gt_test is None:
self.Xv_gt_train, self.Xv_gt_test = data.train_test_shuffle_split(
self.Xv_gt_train, self.config.seed, self.config.data.ratio_train
)
else:
np.random.shuffle(self.Xv_gt_train)
np.random.shuffle(self.Xv_gt_test)
# Compute the visible variables
if isinstance(self.slice_visible, slice):
assert not self.has_multidim_arrays
visible_start = (
self.slice_visible.start if self.slice_visible.start else 0
)
self.visible_vars = list(range(visible_start, self.slice_visible.stop))
else:
visible_starts_stops = []
for this_slice in self.slice_visible:
if isinstance(this_slice, int):
visible_starts_stops.append(range(this_slice, this_slice + 1))
else:
visible_start = this_slice.start if this_slice.start else 0
visible_starts_stops.append(range(visible_start, this_slice.stop))
self.visible_vars = list(itertools.product(*visible_starts_stops))
# Compute the hidden variables
if isinstance(self.slice_hidden, slice):
assert not self.has_multidim_arrays
hidden_start = self.slice_hidden.start if self.slice_hidden.start else 0
self.hidden_vars = list(range(hidden_start, self.slice_hidden.stop))
else:
hidden_starts_stops = []
for this_slice in self.slice_hidden:
if isinstance(this_slice, int):
hidden_starts_stops.append(range(this_slice, this_slice + 1))
else:
hidden_start = this_slice.start if this_slice.start else 0
hidden_starts_stops.append(range(hidden_start, this_slice.stop))
self.hidden_vars = list(itertools.product(*hidden_starts_stops))
self.hidden_visible_vars = self.visible_vars + self.hidden_vars
# Extract structure in the form of dicts where values are arrays of indices
(
self.dict_child_to_parents,
self.dict_child_to_parents_potentials,
self.dict_parent_to_children,
self.dict_parent_to_children_potentials,
self.nodes_to_leak,
) = create_indices_arrays(
self.edges_c2p,
self.X_shape,
self.log_potentials_shape,
self.hidden_visible_vars,
self.leak_node_idx,
)
# Fill-in values used for future paddings
if not self.has_multidim_arrays:
fill_value_nodes = self.leak_node_idx + 1
fill_value_potentials = self.log_potentials_shape[0] + 1
else:
fill_value_nodes = tuple(x + 1 for x in self.leak_node_idx)
fill_value_potentials = tuple(x + 1 for x in self.log_potentials_shape)
# From here, change the structure to only use arrays of fixed shape
# This will allow jitting the JAX functions
# For sparse data, build the local models and pad the visible and hidden
if self.use_local_model:
# For local models, X_visible and X_hidden are the activations indices
self.Xh_gt_train = utils.build_local_model(
self.Xv_gt_train,
self.dict_child_to_parents,
self.config.data.args.n_layers,
)
self.Xh_gt_test = utils.build_local_model(
self.Xv_gt_test,
self.dict_child_to_parents,
self.config.data.args.n_layers,
)
# Pad the visible and hidden variables
self.Xv_gt_train = utils.list_of_arrays_to_array(
self.Xv_gt_train, dtype=int, fill_value=fill_value_nodes
)
self.Xv_gt_test = utils.list_of_arrays_to_array(
self.Xv_gt_test, dtype=int, fill_value=fill_value_nodes
)
self.Xh_gt_train = utils.list_of_arrays_to_array(
self.Xh_gt_train, dtype=int, fill_value=fill_value_nodes
)
self.Xh_gt_test = utils.list_of_arrays_to_array(
self.Xh_gt_test, dtype=int, fill_value=fill_value_nodes
)
else:
self.Xh_gt_train = None
self.Xh_gt_test = None
self.slice_hidden_visible = np.zeros(shape=self.X_shape, dtype=bool)
self.slice_hidden_visible[self.slice_hidden] = True
self.slice_hidden_visible[self.slice_visible] = True
# Convert to arrays
self.visible_vars = jnp.array(self.visible_vars)
self.hidden_vars = jnp.array(self.hidden_vars)
self.hidden_visible_vars = jnp.array(self.hidden_visible_vars)
# Convert all the dictionnaries of indices into arrays of fixed shape
self.arr_child_to_parents = utils.dict_to_array(
self.dict_child_to_parents,
self.has_multidim_arrays,
dtype=int,
fill_value=fill_value_nodes,
)
self.arr_child_to_parents_potentials = utils.dict_to_array(
self.dict_child_to_parents_potentials,
self.has_multidim_arrays,
dtype=int,
fill_value=fill_value_potentials,
)
self.arr_parent_to_children = utils.dict_to_array(
self.dict_parent_to_children,
self.has_multidim_arrays,
dtype=int,
fill_value=fill_value_nodes,
)
self.arr_parent_to_children_potentials = utils.dict_to_array(
self.dict_parent_to_children_potentials,
self.has_multidim_arrays,
dtype=int,
fill_value=fill_value_potentials,
)
print("arr_child_to_parents shape", self.arr_child_to_parents.shape)
print("arr_parent_to_children shape", self.arr_parent_to_children.shape)
# For out-of-bounds
self.nodes_to_leak = self.nodes_to_leak.at[self.leak_node_idx].set(
fill_value_potentials
)
# Create optimizer
self.opt = optax.adam(learning_rate=config.learning.learning_rate)
def __hash__(self):
# pylint: disable=line-too-long
# See https://jax.readthedocs.io/en/latest/faq.html#strategy-2-marking-self-as-static
return hash(tuple(self.edges_c2p.keys()))
@functools.partial(jax.jit, static_argnames=("self", "is_training"))
def compute_ELBOs_and_grad(
self,
log_potentials,
Xv_batch,
Xh_batch=None,
is_training=True,
):
"""Compute the lower-bound of the ELBO and its gradients wrt the log potentials."""
n_samples = len(Xv_batch)
# Compute the variational parameter
if not self.use_local_model:
all_qs, all_rs = jax.vmap(self.EStep, in_axes=(None, 0))(
log_potentials, Xv_batch
)
else:
assert Xh_batch is not None
all_qs, all_rs = jax.vmap(self.EStep, in_axes=(None, 0, 0))(
log_potentials, Xv_batch, Xh_batch
)
all_ELBO_lower_bounds = jax.vmap(
self.compute_ELBO_lower_bound, in_axes=(None, 0, 0)
)(log_potentials, all_qs, all_rs)
chex.assert_equal(all_ELBO_lower_bounds.shape, (n_samples,))
sum_ELBO_lower_bound = jnp.sum(all_ELBO_lower_bounds)
avg_ELBO_lower_bound = sum_ELBO_lower_bound / all_ELBO_lower_bounds.shape[0]
# Compute the gradients of the Elbo wrt the log potentials
if is_training:
if not self.use_local_model:
all_grad_log_potentials = jax.vmap(
self.compute_grads, in_axes=(None, 0, 0)
)(log_potentials, all_qs, all_rs)
else:
all_grad_log_potentials = jax.vmap(
self.compute_grads, in_axes=(None, 0, 0, 0, 0)
)(log_potentials, all_qs, all_rs, Xv_batch, Xh_batch)
avg_grad_log_potentials = jnp.mean(all_grad_log_potentials, axis=0)
chex.assert_equal_shape([log_potentials, avg_grad_log_potentials])
sum_ELBO_mode = None
else:
avg_grad_log_potentials = None
all_ELBOs_mode, _ = jax.vmap(
self.compute_ELBO_from_samples_given_qs, in_axes=(None, 0)
)(log_potentials, all_qs)
chex.assert_equal(all_ELBOs_mode.shape, (n_samples,))
sum_ELBO_mode = jnp.sum(all_ELBOs_mode)
return (
avg_ELBO_lower_bound,
sum_ELBO_lower_bound,
avg_grad_log_potentials,
sum_ELBO_mode,
all_qs,
)
@functools.partial(jax.jit, static_argnames="self")
def EStep(self, log_potentials, X_visible, X_hidden=None):
"""Fast implementation of the variational expectation step."""
qs = self.init_q(X_visible, X_hidden)
rs = self.init_r(log_potentials, X_visible, X_hidden)
def outer_loop_update(qs_rs, _):
(qs, rs), _ = jax.lax.scan(
inner_loop_update, qs_rs, None, self.config.learning.n_inner_loops
)
rs = self.update_r(log_potentials, qs, rs, X_visible, X_hidden)
return (qs, rs), None
def inner_loop_update(qs_rs, _):
qs, rs = qs_rs
qs = self.update_q(log_potentials, qs, rs, X_hidden)
return (qs, rs), None
(qs, rs), _ = jax.lax.scan(
outer_loop_update, (qs, rs), None, self.config.learning.n_outer_loops
)
return qs, rs
@functools.partial(jax.jit, static_argnames="self")
def init_q(self, X_visible, X_hidden=None):
"""Initialize the qs variables."""
init_qs = jnp.zeros(shape=self.X_shape)
if not self.use_local_model:
init_qs = init_qs.at[self.slice_visible].set(X_visible)
init_qs = init_qs.at[self.slice_hidden].set(0.5)
init_qs = init_qs.at[self.leak_node_idx].set(1.0)
else:
assert X_hidden is not None
# For local models, X_visible and X_hidden are the activations indices
init_qs = init_qs.at[X_visible].set(1.0)
# Following Section 5.1, we only update the hidden variables in X_hidden
init_qs = init_qs.at[X_hidden].set(0.5)
init_qs = init_qs.at[self.leak_node_idx].set(1.0)
return init_qs
def set_values(self, args, it):
"""Useful method for setting values sequentially."""
values, all_new_values, all_idx_potentials = args
new_values = all_new_values[it]
idx_potentials = all_idx_potentials[it]
# Out-of-bounds indices are dropped
values = utils.set_value_for_indices(
values, idx_potentials, new_values, self.has_multidim_arrays
)
return (values, all_new_values, all_idx_potentials), None
def add_values(self, args, it):
"""Useful method for adding values sequentially."""
values, all_new_values, all_idx_potentials = args
new_values = all_new_values[it]
idx_potentials = all_idx_potentials[it]
# Out-of-bounds indices are dropped
values = utils.add_value_to_indices(
values, idx_potentials, new_values, self.has_multidim_arrays
)
return (values, all_new_values, all_idx_potentials), None
@functools.partial(jax.jit, static_argnames="self")
def init_r(self, log_potentials, X_visible, X_hidden=None):
"""Initialize the rs variables, Section 4.1.3.
Note: Sparse data is represented with static shapes, which allows to scan
the initialization (similar to dense data).
"""
# Add epsilon for the division in the definition of u
init_rs = jnp.zeros(shape=self.log_potentials_shape) + self.config.min_clip
def init_r_node(init_rs, idx_node, switch):
"""Initialize rs for a single node."""
idx_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_node,
self.has_multidim_arrays,
)
assert idx_potentials_wo_leak.shape[0] > 0
potentials_wo_leak = utils.get_value_by_indices(
log_potentials, idx_potentials_wo_leak, self.has_multidim_arrays
)
norm_potentials_wo_leak = potentials_wo_leak / (
jnp.sum(potentials_wo_leak) + self.config.min_clip
)
old_rs = utils.get_value_by_indices(
init_rs, idx_potentials_wo_leak, self.has_multidim_arrays
)
chex.assert_equal_shape([norm_potentials_wo_leak, old_rs])
# 4.1.3: only set rs for active visible, and for hidden but the leak
new_rs = switch * norm_potentials_wo_leak + (1 - switch) * old_rs
return new_rs, idx_potentials_wo_leak
def init_r_node_visible(init_rs, idx_node):
"""Initialize rs for a single visible node."""
if not self.use_local_model:
if self.has_multidim_arrays:
# Drop the first dimension of visible indices
return init_r_node(init_rs, idx_node, X_visible[tuple(idx_node)[1:]])
else:
return init_r_node(init_rs, idx_node, X_visible[idx_node])
else:
# For local models, X_visible and X_hidden are the activations indices
return init_r_node(init_rs, idx_node, 1.0)
def init_r_node_hidden(init_rs, idx_node):
"""Initialize rs for a single hidden node."""
return init_r_node(init_rs, idx_node, 1.0)
if not self.use_local_model:
visible_vars = self.visible_vars
hidden_vars = self.hidden_vars
else:
assert X_hidden is not None
visible_vars = X_visible
hidden_vars = X_hidden
# Compute the visible initializations in parallel
all_new_rs, all_idx_potentials_wo_leak = jax.vmap(
init_r_node_visible, in_axes=(None, 0)
)(init_rs, visible_vars)
# Set the visible initializations sequentially
(init_rs, _, _), _ = jax.lax.scan(
self.set_values,
(init_rs, all_new_rs, all_idx_potentials_wo_leak),
jnp.arange(visible_vars.shape[0]),
)
# Compute the hidden initializations in parallel
all_new_rs, all_idx_potentials_wo_leak = jax.vmap(
init_r_node_hidden, in_axes=(None, 0)
)(init_rs, hidden_vars)
# Set the hidden initializations sequentially
(init_rs, _, _), _ = jax.lax.scan(
self.set_values,
(init_rs, all_new_rs, all_idx_potentials_wo_leak),
jnp.arange(hidden_vars.shape[0]),
)
return init_rs
@functools.partial(jax.jit, static_argnames="self")
def update_q(self, log_potentials, qs, rs, X_hidden=None):
"""Update all the qs variables, Section 4.1.2.
Note: Sparse data is represented with static shapes, which allows to scan
the updates (similar to dense data).
"""
if not self.use_local_model:
order = self.hidden_vars
else:
assert X_hidden is not None
# Step 2, section 5.1: only update the nodes in the local model
order = X_hidden
def update_q_node(qs, idx_hidden):
"""Update the qs variable associated to a node, Equations (11)-(12)."""
# Get indices
idx_children_potentials = utils.get_value_by_indices(
self.arr_parent_to_children_potentials,
idx_hidden,
self.has_multidim_arrays,
)
idx_children = utils.get_value_by_indices(
self.arr_parent_to_children, idx_hidden, self.has_multidim_arrays
)
idx_parents_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_hidden,
self.has_multidim_arrays,
)
idx_parents_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents, idx_hidden, self.has_multidim_arrays
)
idx_node_to_leak = utils.get_value_by_indices(
self.nodes_to_leak, idx_hidden, self.has_multidim_arrays
)
# Leak node
w_0i = utils.get_value_by_indices(
log_potentials, idx_node_to_leak, self.has_multidim_arrays
)
# Equation (12)
q_i = w_0i + utils.log1mexp(w_0i)
# Parents
w_pi = utils.get_value_by_indices(
log_potentials,
idx_parents_potentials_wo_leak,
self.has_multidim_arrays,
)
q_pi = utils.get_value_by_indices(
qs, idx_parents_wo_leak, self.has_multidim_arrays
)
r_pi = utils.get_value_by_indices(
rs, idx_parents_potentials_wo_leak, self.has_multidim_arrays
)
# Clipping as out-ot-bounds entries have 0s value
r_pi = jnp.clip(r_pi, self.config.min_clip, None)
u_pi = w_0i + w_pi / r_pi
q_i += jnp.sum(
q_pi * (w_pi + r_pi * (utils.log1mexp(u_pi) - utils.log1mexp(w_0i)))
)
# Children
nodes_to_leak_children = utils.get_value_by_indices(
self.nodes_to_leak, idx_children, self.has_multidim_arrays
)
w_0c = utils.get_value_by_indices(
log_potentials, nodes_to_leak_children, self.has_multidim_arrays
)
w_ci = utils.get_value_by_indices(
log_potentials,
idx_children_potentials,
self.has_multidim_arrays,
)
q_ci = utils.get_value_by_indices(
qs, idx_children, self.has_multidim_arrays
)
r_ci = utils.get_value_by_indices(
rs, idx_children_potentials, self.has_multidim_arrays
)
# Clipping as out-ot-bounds entries have 0s value
r_ci = jnp.clip(r_ci, self.config.min_clip, None)
u_ci = w_0c + w_ci / r_ci
# Equation (12)
q_i += jnp.sum(
q_ci * r_ci * (utils.log1mexp(u_ci) - utils.log1mexp(w_0c))
- (1 - q_ci) * w_ci
)
# Equation (11)
new_q_i = jnp.where(
q_i >= 0,
1.0 / (1.0 + jnp.exp(-q_i)),
jnp.exp(q_i) / (1.0 + jnp.exp(q_i)),
)
new_q_i = jnp.clip(new_q_i, self.config.min_clip, None)
# Out-of-bounds indices are dropped
qs = utils.set_value_for_indices(
qs, idx_hidden, new_q_i, self.has_multidim_arrays
)
return qs, None
# Scan the updates
qs, _ = jax.lax.scan(update_q_node, qs, order)
return qs
@functools.partial(jax.jit, static_argnames="self")
def update_r(self, log_potentials, qs, rs, X_visible, X_hidden=None):
"""Update the rs variables, Section 4.1.1.
Note: Sparse data is represented with static shapes, which allows to scan
the updates (similar to dense data).
"""
def update_r_node(rs, idx_node, switch):
"""Update the rs variable associated to a node."""
# Get indices
idx_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_node,
self.has_multidim_arrays,
)
idx_parents_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents, idx_node, self.has_multidim_arrays
)
idx_node_to_leak = utils.get_value_by_indices(
self.nodes_to_leak, idx_node, self.has_multidim_arrays
)
assert idx_parents_wo_leak.shape[0] > 0
# Get parameters values
w_0i = utils.get_value_by_indices(
log_potentials, idx_node_to_leak, self.has_multidim_arrays
)
w_pi = utils.get_value_by_indices(
log_potentials, idx_potentials_wo_leak, self.has_multidim_arrays
)
q_pi = utils.get_value_by_indices(
qs, idx_parents_wo_leak, self.has_multidim_arrays
)
r_pi = utils.get_value_by_indices(
rs, idx_potentials_wo_leak, self.has_multidim_arrays
)
old_r_pi = r_pi.copy()
# Clipping as out-ot-bounds entries have 0s value
r_pi = jnp.clip(r_pi, self.config.min_clip, None)
def update_r(r_pi, _):
"""Implement Equation (10)."""
u_pi = w_0i + w_pi / r_pi
r_pi = q_pi * (
r_pi * (utils.log1mexp(u_pi) - utils.log1mexp(w_0i))
- w_pi * f_prime(u_pi)
)
r_pi /= jnp.sum(r_pi) + self.config.min_clip
r_pi = jnp.clip(r_pi, self.config.min_clip, None)
return r_pi, None
r_pi, _ = jax.lax.scan(
update_r, r_pi, None, self.config.learning.n_inner_loops
)
chex.assert_equal_shape([r_pi, old_r_pi])
new_r_pi = switch * r_pi + (1 - switch) * old_r_pi
new_r_pi = jnp.clip(new_r_pi, self.config.min_clip, None)
new_r_pi /= jnp.sum(new_r_pi)
return new_r_pi, idx_potentials_wo_leak
def update_r_node_visible(rs, idx_node):
"""Single visible node update."""
if not self.use_local_model:
if self.has_multidim_arrays:
# Drop the first dimension of visible indices
return update_r_node(rs, idx_node, X_visible[tuple(idx_node)[1:]])
else:
return update_r_node(rs, idx_node, X_visible[idx_node])
else:
# For local models, X_visible and X_hidden are the activations indices
return update_r_node(rs, idx_node, 1.0)
def update_r_node_hidden(rs, idx_node):
"""Single hidden node update."""
return update_r_node(rs, idx_node, 1.0)
if not self.use_local_model:
visible_vars = self.visible_vars
hidden_vars = self.hidden_vars
else:
assert X_hidden is not None
visible_vars = X_visible
hidden_vars = X_hidden
# Update the visible variables in parallel, as mentionned in 4.1.1
all_new_rs, all_idx_potentials_wo_leak = jax.vmap(
update_r_node_visible, in_axes=(None, 0)
)(rs, visible_vars)
# Set the visible updates sequentially
(rs, _, _), _ = jax.lax.scan(
self.set_values,
(rs, all_new_rs, all_idx_potentials_wo_leak),
jnp.arange(visible_vars.shape[0]),
)
# Update the hidden variables in parallel, as mentionned in 4.1.1
all_new_rs, all_idx_potentials_wo_leak = jax.vmap(
update_r_node_hidden, in_axes=(None, 0)
)(rs, hidden_vars)
# Set the hidden updates sequentially
(rs, _, _), _ = jax.lax.scan(
self.set_values,
(rs, all_new_rs, all_idx_potentials_wo_leak),
jnp.arange(hidden_vars.shape[0]),
)
return rs
@functools.partial(jax.jit, static_argnames="self")
def compute_grads(
self, log_potentials, qs, rs, X_visible=None, X_hidden=None
):
"""Compute the gradients of the Elbo wrt log potentials, Section 4.2.1 and 4.2.2.
Note: Sparse data is represented with static shapes, which allows to scan
the computation (similar to dense data).
"""
def compute_grad_node(idx_node):
"""For each node, compute the partial derivatives for (1) the parameters connecting it to the parents and (2) the parameter connecting it to the leak."""
# Get indices
idx_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_node,
self.has_multidim_arrays,
)
idx_parents_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents, idx_node, self.has_multidim_arrays
)
idx_node_to_leak = utils.get_value_by_indices(
self.nodes_to_leak, idx_node, self.has_multidim_arrays
)
# Leak node
w_0i = utils.get_value_by_indices(
log_potentials, idx_node_to_leak, self.has_multidim_arrays
)
if idx_parents_wo_leak.shape[0] == 0:
sum_pi = 0
else:
# Parents
w_pi = utils.get_value_by_indices(
log_potentials, idx_potentials_wo_leak, self.has_multidim_arrays
)
q_pi = utils.get_value_by_indices(
qs, idx_parents_wo_leak, self.has_multidim_arrays
)
r_pi = utils.get_value_by_indices(
rs, idx_potentials_wo_leak, self.has_multidim_arrays
)
q_i = utils.get_value_by_indices(qs, idx_node, self.has_multidim_arrays)
# Clipping as out-ot-bounds entries have 0s value
r_pi = jnp.clip(r_pi, self.config.min_clip, None)
u_pi = w_0i + w_pi / r_pi
# Eqs (13)-(14): partial derivative wrt edges from parents to child
grad_pi = jnp.where(
u_pi >= 0,
1.0 / (1.0 - jnp.exp(-u_pi)),
jnp.exp(u_pi) / (jnp.exp(u_pi) - 1.0),
)
grad_pi = q_pi * (q_i * grad_pi - 1)
chex.assert_equal_shape([grad_pi, w_pi])
# Useful quantity
sum_pi = jnp.sum(q_pi * r_pi * (f_prime(u_pi) - f_prime(w_0i)))
# Equations (15)-(16): partial derivative wrt edge from leak to child
grad_leak_i = q_i * f_prime(w_0i) - (1.0 - q_i) + q_i * sum_pi
return grad_pi, idx_potentials_wo_leak, grad_leak_i, idx_node_to_leak
# Initialze the gradient
grads_log_potentials = jnp.zeros_like(log_potentials)
if not self.use_local_model:
hidden_visible_vars = self.hidden_visible_vars
else:
assert X_visible is not None
assert X_hidden is not None
assert not self.has_multidim_arrays
hidden_visible_vars = jnp.concatenate([X_visible, X_hidden])
# This works as self.arr_child_to_parents is 1D
all_q_pi = utils.get_value_by_indices(
qs, self.arr_child_to_parents, self.has_multidim_arrays
)
# Equations (13)-(14):
# partial derivative w.r.t non-leak edge not in local models is -q_k
grads_log_potentials = utils.set_value_for_indices(
grads_log_potentials,
self.arr_child_to_parents_potentials,
-all_q_pi,
self.has_multidim_arrays,
)
# Equations (15)-(16):
# partial derivative w.r.t leak edge not in local models is -1
grads_log_potentials = utils.set_value_for_indices(
grads_log_potentials,
self.nodes_to_leak,
-1.0,
self.has_multidim_arrays,
)
# Compute the gradient in parallel
(
all_grad_potentials_wo_leak,
all_idx_potentials_wo_leak,
all_grad_potentials_leak,
all_idx_node_to_leak,
) = jax.vmap(compute_grad_node, in_axes=(0,))(hidden_visible_vars)
# Add or set the updates sequentially
if self.has_multidim_arrays:
# Note: a parameter can only be shared across edges in the multidim case
# Gradient for all the parents-children potentials
(grads_log_potentials, _, _), _ = jax.lax.scan(
self.add_values,
(
grads_log_potentials,
all_grad_potentials_wo_leak,
all_idx_potentials_wo_leak,
),
jnp.arange(hidden_visible_vars.shape[0]),
)
# Gradient for all the potentials connecting to the leak
(grads_log_potentials, _, _), _ = jax.lax.scan(
self.add_values,
(
grads_log_potentials,
all_grad_potentials_leak,
all_idx_node_to_leak,
),
jnp.arange(hidden_visible_vars.shape[0]),
)
else:
# Gradient for all the parents-children potentials
(grads_log_potentials, _, _), _ = jax.lax.scan(
self.set_values,
(
grads_log_potentials,
all_grad_potentials_wo_leak,
all_idx_potentials_wo_leak,
),
jnp.arange(hidden_visible_vars.shape[0]),
)
# Gradient for all the potentials connecting to the leak
(grads_log_potentials, _, _), _ = jax.lax.scan(
self.set_values,
(
grads_log_potentials,
all_grad_potentials_leak,
all_idx_node_to_leak,
),
jnp.arange(hidden_visible_vars.shape[0]),
)
return grads_log_potentials
def compute_entropy(self, qs):
"""Compute the entropy."""
q_hidden = qs[self.slice_hidden] # extract the visible variables
q_hidden_below = jnp.clip(q_hidden, self.config.min_clip, None)
one_minus_q_hidden_above = jnp.clip(
1 - q_hidden, self.config.min_clip, None
)
arr = q_hidden * jnp.log(q_hidden_below) + (1 - q_hidden) * jnp.log(
one_minus_q_hidden_above
)
entropy = -jnp.sum(arr)
return entropy
@functools.partial(jax.jit, static_argnames="self")
def compute_ELBO_lower_bound(self, log_potentials, qs, rs):
"""Compute the lower bound of the ELBO, Equation (9), for a sample."""
def compute_ELogP_lower_bound_node(idx_node):
"""Compute the lower bound for a variable, Equation (8)."""
# Get indices
idx_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_node,
self.has_multidim_arrays,
)
idx_parents_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents, idx_node, self.has_multidim_arrays
)
idx_node_to_leak = utils.get_value_by_indices(
self.nodes_to_leak, idx_node, self.has_multidim_arrays
)
# Get values
w_0i = utils.get_value_by_indices(
log_potentials, idx_node_to_leak, self.has_multidim_arrays
)
w_pi = utils.get_value_by_indices(
log_potentials, idx_potentials_wo_leak, self.has_multidim_arrays
)
q_pi = utils.get_value_by_indices(
qs, idx_parents_wo_leak, self.has_multidim_arrays
)
r_pi = utils.get_value_by_indices(
rs, idx_potentials_wo_leak, self.has_multidim_arrays
)
q_i = utils.get_value_by_indices(qs, idx_node, self.has_multidim_arrays)
# Clipping as out-ot-bounds entries have 0s value
r_pi = jnp.clip(r_pi, self.config.min_clip, None)
E_log_p_off = -w_0i - jnp.dot(w_pi, q_pi)
# Compute the expectation in the second line of Equation (8)
if idx_parents_wo_leak.shape[0] == 0:
E_log_p_on = utils.log1mexp(w_0i)
else:
u_pi = w_0i + w_pi / r_pi
E_log_p_on = utils.log1mexp(w_0i) + jnp.sum(
q_pi * r_pi * (utils.log1mexp(u_pi) - utils.log1mexp(w_0i))
)
res = q_i * E_log_p_on + (1 - q_i) * E_log_p_off
return res
if not self.has_multidim_arrays:
# Compute the lower bound at each node but the leak
ELBOs = jax.vmap(compute_ELogP_lower_bound_node)(self.hidden_visible_vars)
ELBO = jnp.sum(ELBOs)
else:
# When the shapes are 1D we can vectorize the computations
all_w_0i = log_potentials[self.nodes_to_leak[self.slice_hidden_visible]]
# Out-of-bounds entries are filled in with 0s
all_w_pi = utils.get_value_by_indices(
log_potentials,
self.arr_child_to_parents_potentials,
has_multidim_arrays=False,
)
all_q_pi = utils.get_value_by_indices(
qs, self.arr_child_to_parents, has_multidim_arrays=False
)
all_w_dot_q = jax.vmap(jnp.dot, in_axes=(0, 0))(all_w_pi, all_q_pi)
chex.assert_equal(all_w_0i.shape, all_w_dot_q.shape)
all_r_pi = utils.get_value_by_indices(
rs, self.arr_child_to_parents_potentials, has_multidim_arrays=False
)
# Clipping as out-ot-bounds entries have 0s value
all_r_pi = jnp.clip(all_r_pi, self.config.min_clip, None)
E_log_p_off = -all_w_0i - all_w_dot_q
all_u_pi = all_w_0i[:, None] + all_w_pi / all_r_pi
E_log_p_on = utils.log1mexp(all_w_0i) + jnp.sum(
all_q_pi
* all_r_pi
* (utils.log1mexp(all_u_pi) - utils.log1mexp(all_w_0i)[:, None]),
axis=1,
)
E_log_p_on = E_log_p_on.reshape(-1)
chex.assert_equal(E_log_p_off.shape, E_log_p_on.shape)
all_res = (
qs[self.slice_hidden_visible] * E_log_p_on
+ (1 - qs[self.slice_hidden_visible]) * E_log_p_off
)
ELBO = jnp.sum(all_res)
# Add the entropy
ELBO += self.compute_entropy(qs)
return ELBO
@functools.partial(jax.jit, static_argnames="self")
def compute_ELBO_from_samples_given_qs(self, log_potentials, all_qs):
"""Given the posterior qs, estimate the posterior mode then compute the max-product ELBO."""
# Estimate the posterior mode
X_sample = jnp.round(all_qs)
def log_joint_lik_node(X_sample, idx_node):
"""Joint likelihood of the binary hidden and visible, Equation (1)."""
# Get indices
idx_potentials_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents_potentials,
idx_node,
self.has_multidim_arrays,
)
idx_parents_wo_leak = utils.get_value_by_indices(
self.arr_child_to_parents, idx_node, self.has_multidim_arrays
)
idx_node_to_leak = utils.get_value_by_indices(
self.nodes_to_leak, idx_node, self.has_multidim_arrays
)
# Get values
w_0i = utils.get_value_by_indices(
log_potentials,
idx_node_to_leak,
self.has_multidim_arrays,
)
w_pi = utils.get_value_by_indices(
log_potentials, idx_potentials_wo_leak, self.has_multidim_arrays
)
X_pi = utils.get_value_by_indices(
X_sample, idx_parents_wo_leak, self.has_multidim_arrays
)
X_i = utils.get_value_by_indices(
X_sample, idx_node, self.has_multidim_arrays
)
sum_factor = w_0i + jnp.dot(w_pi, X_pi)
log_joint_lik_node = (1 - X_i) * (-sum_factor) + X_i * jnp.clip(
jnp.log1p(-jnp.exp(-sum_factor)), utils.CLIP_INF, None
)
return log_joint_lik_node
if not self.has_multidim_arrays:
# Joint likelihood of the hidden and the visible, Equation (1)."""
log_joint_liks = jax.vmap(log_joint_lik_node, in_axes=(None, 0))(
X_sample, self.hidden_visible_vars
)
log_joint_lik = jnp.sum(log_joint_liks)
else:
# When the shapes are 1D we can vectorize the computations
all_w_0i = log_potentials[self.nodes_to_leak[self.slice_hidden_visible]]
# Out-of-bouns entries are filled in with 0s
all_w_pi = utils.get_value_by_indices(
log_potentials,
self.arr_child_to_parents_potentials,
has_multidim_arrays=False,
)
X_sample_parents = utils.get_value_by_indices(
X_sample, self.arr_child_to_parents, has_multidim_arrays=False
)
all_w_dot_X = jax.vmap(jnp.dot, in_axes=(0, 0))(
all_w_pi, X_sample_parents
)
chex.assert_equal(all_w_0i.shape, all_w_dot_X.shape)
sum_factor_XW = all_w_0i + all_w_dot_X
# Clipping to avoid nan
# Note: if sum_factor_XW[k] is so small that we need clipping,
# then X_sample[k] = 0 and we do not care about the second term
log_p_factors = -sum_factor_XW * (
1 - X_sample[self.slice_hidden_visible]
) + X_sample[self.slice_hidden_visible] * jnp.clip(
jnp.log1p(-jnp.exp(-sum_factor_XW)), utils.CLIP_INF, None
)
log_joint_lik = log_p_factors.sum()
return log_joint_lik, X_sample
def update_log_potentials(
self, Xv_batch, log_potentials, opt_state, Xh_batch=None
):
"""Update the log potentials."""
# Get the loss and the gradients
(avg_ELBO_lower_bound, _, grad_log_potentials, _, _) = (
self.compute_ELBOs_and_grad(
log_potentials, Xv_batch, Xh_batch, is_training=True
)
)
# Update the log potentials
grad_log_potentials *= -1.0
updates, new_opt_state = self.opt.update(grad_log_potentials, opt_state)
new_log_potentials = optax.apply_updates(log_potentials, updates)
new_log_potentials = jnp.clip(
new_log_potentials,
self.config.min_clip,
None,
)
if self.dont_update_potentials_mask is not None:
new_log_potentials += self.dont_update_potentials_mask * (
log_potentials - new_log_potentials
)
return new_log_potentials, new_opt_state, avg_ELBO_lower_bound
def eval_ELBOs_dataset(self, Xv, log_potentials, Xh=None):
"""Compute two ELBOs on an entire dataset.
(1) The first one is the regular ELBO for the VI mean-field posterior
(2) The second one defines the posterior via a Dirac at its mode
"""
test_batch_size = self.config.inference.test_batch_size
n_batches = (len(Xv) + test_batch_size - 1) // test_batch_size
if Xh is not None:
assert len(Xv) == len(Xh)
sum_elbo_lower_bound = 0.0
sum_elbo_mode = 0.0
all_qs_batch = []
for batch_idx in tqdm.trange(n_batches):
Xv_batch = Xv[
batch_idx * test_batch_size : (batch_idx + 1) * test_batch_size
]
if self.use_local_model:
Xh_batch = Xh[
batch_idx * test_batch_size : (batch_idx + 1) * test_batch_size
]
else:
Xh_batch = None
(
_,
sum_elbo_lower_bound_batch,
_,
sum_elbo_mode_batch,
qs_batch,
) = self.compute_ELBOs_and_grad(
log_potentials,
Xv_batch,
Xh_batch=Xh_batch,
is_training=False,
)
sum_elbo_lower_bound += sum_elbo_lower_bound_batch
sum_elbo_mode += sum_elbo_mode_batch
all_qs_batch.append(qs_batch)
all_qs = np.concatenate(all_qs_batch, axis=0)
return (sum_elbo_lower_bound / len(Xv), sum_elbo_mode / len(Xv), all_qs)
def train(self, init_log_potentials=None):
"""Train the noisy OR model with VI."""
if init_log_potentials is not None:
log_potentials = init_log_potentials
log_potentials = jnp.clip(log_potentials, self.config.min_clip, None)
else:
log_potentials = utils.init_log_potentials(
self.log_potentials_shape,
self.config.learning.proba_init,
self.leak_potentials_mask,
self.config.learning.leak_proba_init,
self.dont_update_potentials_mask,
self.config.learning.leak_proba_init_not_updated,
self.config.learning.noise_temperature_init,
self.config.min_clip,
)
opt_state = self.opt.init(log_potentials)
current_step = 0
n_steps = self.config.learning.num_iters
train_batch_size = self.config.learning.train_batch_size
n_batches = (
len(self.Xv_gt_train) + train_batch_size - 1
) // train_batch_size
all_update_times = []
all_eval_times = []
all_train_avg_elbos_lb = []
all_test_avg_elbos_mode = []
all_test_avg_elbos_lb = []
# Training iterations
print(f"Training for {n_steps} steps")
pbar = tqdm.tqdm(range(n_steps + 1))
display = {}
for it in pbar:
# Extract batch
batch_idx = it % n_batches
Xv_batch = self.Xv_gt_train[
batch_idx * train_batch_size : (batch_idx + 1) * train_batch_size
]
if self.use_local_model:
Xh_batch = self.Xh_gt_train[
batch_idx * train_batch_size : (batch_idx + 1) * train_batch_size
]
else:
Xh_batch = None
# Gradient step
start_update = time.time()
(
log_potentials,
opt_state,
avg_elbo_lower_bound,
) = self.update_log_potentials(
Xv_batch, log_potentials, opt_state, Xh_batch=Xh_batch
)
train_avg_elbo_lower_bound = float(jax.device_get(avg_elbo_lower_bound))
display["train_elbo_lb"] = round(train_avg_elbo_lower_bound, 4)
# First iteration compiles
if current_step > 0:
update_time = time.time() - start_update
all_update_times.append(update_time)
all_train_avg_elbos_lb.append(train_avg_elbo_lower_bound)
# Evaluation step
if (
current_step % self.config.learning.eval_every == 0
or current_step == n_steps
):
start_eval = time.time()
(
test_avg_elbo_lower_bound,
test_avg_elbo_mode,
test_qs,
) = self.eval_ELBOs_dataset(
self.Xv_gt_test[: self.config.inference.test_size_eval_and_store],
log_potentials,
self.Xh_gt_test,
)
test_avg_elbo_lower_bound = float(
jax.device_get(test_avg_elbo_lower_bound)
)
test_avg_elbo_mode = float(jax.device_get(test_avg_elbo_mode))
display["test_elbo_lb"] = round(test_avg_elbo_lower_bound, 4)
eval_time = time.time() - start_eval
if current_step > 0:
all_test_avg_elbos_lb.append(test_avg_elbo_lower_bound)
all_test_avg_elbos_mode.append(test_avg_elbo_mode)
all_eval_times.append(eval_time)
# When we store_inference_results, evaluate on the training set in the end
if (
current_step == n_steps
and self.config.inference.store_inference_results
):
(
last_train_avg_elbo_lower_bound,
last_train_avg_elbo_mode,
last_train_qs,
) = self.eval_ELBOs_dataset(
self.Xv_gt_train[: self.config.inference.test_size_eval_and_store],
log_potentials,
self.Xh_gt_train,
)
last_train_avg_elbo_lower_bound = float(
jax.device_get(last_train_avg_elbo_lower_bound)
)
last_train_avg_elbo_mode = float(
jax.device_get(last_train_avg_elbo_mode)
)
pbar.set_postfix(display)
current_step += 1
print("Training finished")
results = {
"config": self.config,
"log_potentials": log_potentials,
"all_train_avg_elbos_lb": all_train_avg_elbos_lb,
"all_test_avg_elbos_mode": all_test_avg_elbos_mode,
"all_test_avg_elbos_lb": all_test_avg_elbos_lb,
"all_update_times": all_update_times,
"all_eval_times": all_eval_times,
}
return results
def f_prime(x):
"""Stable implementation of the derivative of f(x)=log(1 - exp(-x))."""
stable_f_prime = jnp.where(
x >= 0, jnp.exp(-x) / (1.0 - jnp.exp(-x)), 1.0 / (jnp.exp(x) - 1.0)
)
return jnp.clip(stable_f_prime, utils.CLIP_INF, None)
|
max_product_noisy_or-main
|
mp_noisy_or/noisy_or_vi.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train the noisy OR models."""
import ml_collections
# pylint: disable=invalid-name
def get_config():
"""Training config for both BP and VI."""
config = ml_collections.config_dict.ConfigDict()
config.method = ""
config.dataset = ""
config.config_BP_20news = get_config_BP_20news()
config.config_BP_BMF = get_config_BP_BMF()
config.config_BP_2Ddeconv = get_config_BP_2Ddeconv()
config.config_BP_yelp = get_config_BP_yelp()
config.config_BP_imdb = get_config_BP_imdb()
config.config_BP_abstract = get_config_BP_abstract()
config.config_BP_agnews = get_config_BP_agnews()
config.config_BP_patent = get_config_BP_patent()
config.config_BP_overparam = get_config_BP_overparam()
config.config_VI_20news = get_config_VI_20news()
config.config_VI_BMF = get_config_VI_BMF()
config.config_VI_2Ddeconv = get_config_VI_2Ddeconv()
config.config_VI_yelp = get_config_VI_yelp()
config.config_VI_imdb = get_config_VI_imdb()
config.config_VI_abstract = get_config_VI_abstract()
config.config_VI_agnews = get_config_VI_agnews()
config.config_VI_patent = get_config_VI_patent()
config.config_PMP_BMF = get_config_PMP_BMF()
config.config_PMP_2Ddeconv = get_config_PMP_2Ddeconv()
return config
##############################################
############## BP configs ####################
##############################################
def get_config_BP() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP."""
config = ml_collections.config_dict.ConfigDict()
config.seed = 0
config.min_clip = 1e-5
config.backend = "bp"
config.data = ml_collections.config_dict.ConfigDict()
config.data.args = ml_collections.config_dict.ConfigDict()
config.bp = ml_collections.config_dict.ConfigDict()
config.bp.temperature = 0.0 # max-product
config.bp.num_iters = 100
config.bp.damping = 0.5
config.learning = ml_collections.config_dict.ConfigDict()
# Initialization parameters
config.learning.proba_init = 0.5
config.learning.leak_proba_init = 0.9
config.learning.leak_proba_init_not_updated = 0.99
config.learning.noise_temperature_init = 0.0
config.learning.learning_rate = 1e-2
config.learning.num_iters = 1000
config.learning.train_batch_size = 100_000
config.learning.eval_every = 100
config.learning.n_hidden_by_sample = 1
config.learning.noise_temperature = 0.0
config.inference = ml_collections.config_dict.ConfigDict()
config.inference.store_inference_results = False
config.inference.test_size_eval_and_store = 100_000
config.inference.test_batch_size = 100_000
# Mode at test time
config.inference.n_hidden_by_sample = 1
config.inference.noise_temperature = 0.0
return config
def get_config_BP_20news() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on 20news."""
config = get_config_BP()
config.data.dataset = "20news"
config.data.ratio_train = 0.7
config.data.args.sparse_data = False
config.data.args.n_layers = 3
return config
def get_config_BP_yelp() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on yelp."""
config = get_config_BP()
# Train set is 560_000, test is 38_000
config.data.dataset = "yelp_polarity_reviews"
config.data.args.key_name = "text"
config.data.args.vocab_size = 10_000
config.data.args.max_sequence_length = 500
config.data.args.n_layers = 5
config.learning.learning_rate = 3e-4
config.learning.train_batch_size = 128
config.learning.num_iters = 3_600 # then VI training
config.learning.eval_every = 600 # eval is slow
config.inference.test_batch_size = 512
return config
def get_config_BP_imdb() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on imdb."""
config = get_config_BP_yelp()
# Train set is 25_000, test is 25_000
config.data.dataset = "imdb_reviews"
return config
def get_config_BP_abstract() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on abstract."""
config = get_config_BP_yelp()
# Train set is 203_037, test is 6_440
config.data.dataset = "scientific_papers"
config.data.args.key_name = "abstract"
return config
def get_config_BP_agnews() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on abstract."""
config = get_config_BP_yelp()
# Train set is 120_000, test is 7_600
config.data.dataset = "ag_news_subset"
config.data.args.key_name = "description"
return config
def get_config_BP_patent() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on abstract."""
config = get_config_BP_yelp()
# Train set is 85_568, test is 4_754
config.data.dataset = "patent"
config.data.args.key_name = "description"
return config
def get_config_BP_BMF() -> ml_collections.config_dict.ConfigDict:
"""Training config for BP on binary deconvolution."""
config = get_config_BP()
config.data.dataset = "BMF"
config.data.ratio_train = 0.5
config.data.args.seed = 0
config.data.args.n_rows = 50
config.data.args.rank = 15
config.data.args.n_cols = 50
config.data.args.p_Xon = 0.25
config.learning.learning_rate = 1e-3
config.learning.train_batch_size = 20
config.learning.num_iters = 40_000
config.learning.eval_every = 4_00
config.inference.store_inference_results = True
# Add noise to break symmetries
config.learning.noise_temperature = 1.0
config.learning.noise_temperature_init = 0.1
return config
def get_config_BP_2Ddeconv() -> ml_collections.config_dict.ConfigDict:
"""Training config for BP on binary deconvolution."""
config = get_config_BP()
config.data.dataset = "2D_deconvolution"
config.data.ratio_train = 0.8
config.data.args.dataset_name = "pmp"
config.data.args.W_shape = (16, 5, 5)
config.learning.num_iters = 3000
config.learning.eval_every = 300
config.inference.store_inference_results = True
# Add noise to break symmetries
config.learning.noise_temperature = 1.0
config.learning.noise_temperature_init = 0.1
return config
def get_config_BP_overparam() -> ml_collections.config_dict.ConfigDict:
"""Training config for BP on binary deconvolution."""
config = get_config_BP()
config.data.dataset = "overparam"
config.data.ratio_train = 0.9
config.data.args.dataset_name = "PLNT"
config.data.args.n_latent = 8
config.learning.learning_rate = 1e-3 # as in the paper
config.learning.train_batch_size = 20 # as in the paper
config.learning.num_iters = 45_000 # as in the paper
config.learning.eval_every = 5_000
# Add noise to break symmetries
config.learning.noise_temperature = 1.0
config.learning.noise_temperature_init = 0.1
return config
##############################################
############## VI configs ####################
##############################################
def get_config_VI() -> ml_collections.config_dict.ConfigDict:
"""Training config for the VI method."""
config = ml_collections.config_dict.ConfigDict()
config.seed = 0
config.init_model_path = ""
config.min_clip = 1e-5
config.data = ml_collections.config_dict.ConfigDict()
config.data.args = ml_collections.config_dict.ConfigDict()
config.learning = ml_collections.config_dict.ConfigDict()
# Initialization parameters
config.learning.proba_init = 0.5
config.learning.leak_proba_init = 0.9
config.learning.leak_proba_init_not_updated = 0.99
config.learning.noise_temperature_init = 0.0
config.learning.n_inner_loops = 10
config.learning.n_outer_loops = 10
config.learning.learning_rate = 0.01
config.learning.eval_every = 10
config.learning.train_batch_size = 100_000
config.learning.num_iters = 1000
config.inference = ml_collections.config_dict.ConfigDict()
config.inference.test_batch_size = 100_000
config.inference.test_size_eval_and_store = 100_000
config.inference.store_inference_results = False
return config
def get_config_VI_20news() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on 20news."""
config = get_config_VI()
config.data.dataset = "20news"
config.data.ratio_train = 0.7
config.data.args.sparse_data = False
config.data.args.n_layers = 3
return config
def get_config_VI_20news_from_authors() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on 20news."""
config = get_config_VI()
config.data.dataset = "20news_from_authors"
config.data.ratio_train = 0.7
config.data.args.sparse_data = False
return config
def get_config_VI_yelp() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on yelp."""
config = get_config_VI()
# Train set is 560_000, test is 38_000
config.data.dataset = "yelp_polarity_reviews"
config.data.args.key_name = "text"
config.data.args.vocab_size = 10_000
config.data.args.max_sequence_length = 500
config.data.args.n_layers = 5
config.learning.train_batch_size = 128
config.learning.num_iters = 4_000
config.learning.eval_every = 400
config.inference.test_batch_size = 512
return config
def get_config_VI_imdb() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on imdb."""
config = get_config_VI_yelp()
# Train set is 25_000, test is 25_000
config.data.dataset = "imdb_reviews"
return config
def get_config_VI_abstract() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on abstract."""
config = get_config_VI_yelp()
# Train set is 203_037, test is 6_440
config.data.dataset = "scientific_papers"
config.data.args.key_name = "abstract"
return config
def get_config_VI_agnews() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on abstract."""
config = get_config_VI_yelp()
# Train set is 120_000, test is 7_600
config.data.dataset = "ag_news_subset"
config.data.args.key_name = "description"
return config
def get_config_VI_patent() -> ml_collections.config_dict.ConfigDict:
"""Base config for BP on abstract."""
config = get_config_VI_yelp()
# Train set is 85_568, test is 4_754
config.data.dataset = "patent"
config.data.args.key_name = "description"
return config
def get_config_VI_BMF() -> ml_collections.config_dict.ConfigDict:
"""Training config for BP on binary deconvolution."""
config = get_config_VI()
config.data.dataset = "BMF"
config.data.ratio_train = 0.5
config.data.args.seed = 0
config.data.args.n_rows = 50
config.data.args.rank = 15
config.data.args.n_cols = 50
config.data.args.p_Xon = 0.25
config.learning.learning_rate = 1e-3
config.learning.train_batch_size = 20
config.learning.num_iters = 40_000
config.learning.eval_every = 4_00
config.inference.store_inference_results = True
# Add noise to break symmetries
config.learning.noise_temperature_init = 0.1
return config
def get_config_VI_2Ddeconv() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on binary deconvolution."""
config = get_config_VI()
config.data.dataset = "2D_deconvolution"
config.data.ratio_train = 0.8
config.learning.num_iters = 3000
config.learning.eval_every = 300
config.inference.store_inference_results = True
# Add noise to break symmetries
config.learning.noise_temperature_init = 0.1
return config
##############################################
############## PMP configs ###################
##############################################
def get_config_PMP_BMF() -> ml_collections.config_dict.ConfigDict:
"""Training config for BP on binary deconvolution."""
config = ml_collections.config_dict.ConfigDict()
config.seed = 0
config.n_rows = 50
config.rank = 15
config.n_cols = 50
config.p_Xon = 0.25
return config
def get_config_PMP_2Ddeconv() -> ml_collections.config_dict.ConfigDict:
"""Training config for VI on binary deconvolution."""
config = ml_collections.config_dict.ConfigDict()
config.seed = 0
config.ratio_train = 0.8
return config
|
max_product_noisy_or-main
|
mp_noisy_or/config.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a noisy OR Bayesian network with max-product Belief Propagation in JAX."""
import datetime
import functools
import time
import chex
import jax
import jax.numpy as jnp
import numpy as np
import optax
from pgmax import factor
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
import tqdm
from mp_noisy_or import data
from mp_noisy_or import utils
# pylint: disable=g-complex-comprehension
# pylint: disable=g-doc-args
# pylint: disable=invalid-name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=comparison-with-itself
def build_noisy_or_fg(edges_children_to_parents, X_shape):
"""Build a factor graph with the PGMax representation of the noisy OR factors.
Args:
edges_children_to_parents: Dict {idx_child: {idx_parent: idx_potential}}
X_shape: Shape of array containing all the hidden and visible variables
Returns:
fg: factor graph with the noisyOR factors
factor_children_indices: Indices of all children
factor_parents_indices: Indices of all children
fator_log_potentials_indices: Indices of log-potentials
"""
children_to_parents = tuple(
[
(idx_child, idx_parent)
for idx_child, idx_parents_idx_potentials in edges_children_to_parents.items()
for idx_parent in idx_parents_idx_potentials
]
)
num_ORFactors = len(children_to_parents)
children_to_parents_dict = dict(
zip(children_to_parents, np.arange(num_ORFactors))
)
# Define variables groups
X = vgroup.NDVarArray(num_states=2, shape=X_shape)
noisy_X = vgroup.NDVarArray(num_states=2, shape=(num_ORFactors,))
# Define the factor graph
fg = fgraph.FactorGraph(variable_groups=[X, noisy_X])
# Loop through the entries of the dictionnary
variables_for_ORFactors = []
variables_for_NoisyFactors = []
factors_log_potentials_indices = []
for (
idx_child,
idx_parents_idx_potentials,
) in edges_children_to_parents.items():
variables_for_ORFactor = []
for idx_parent, idx_potential in idx_parents_idx_potentials.items():
parent = X[idx_parent]
idx_noisy_X = children_to_parents_dict[(idx_child, idx_parent)]
noisy_parent = noisy_X[idx_noisy_X]
variables_for_ORFactor.append(noisy_parent)
# Noisy factor: order matters!
variables_for_NoisyFactors.append([parent, noisy_parent])
factors_log_potentials_indices.append(idx_potential)
child = X[idx_child]
variables_for_ORFactor.append(child)
variables_for_ORFactors.append(variables_for_ORFactor)
# Build the FactorGroups
OR_factor_group = fgroup.ORFactorGroup(variables_for_ORFactors)
log_potential_matrix = np.zeros((num_ORFactors,) + (2, 2))
noisy_factor_group = fgroup.PairwiseFactorGroup(
variables_for_factors=variables_for_NoisyFactors,
log_potential_matrix=log_potential_matrix,
)
# Add the FactorGroups, which is computationally efficient
fg.add_factors([OR_factor_group, noisy_factor_group])
# Factor indices
factor_children_indices = jnp.array(children_to_parents)[:, 0]
factor_parents_indices = jnp.array(children_to_parents)[:, 1]
fator_log_potentials_indices = jnp.array(factors_log_potentials_indices)
def _get_indices(array):
"""Useful function to process indices."""
array_by_dim = tuple(
[array[:, idx_col] for idx_col in range(array.shape[1])]
)
return array_by_dim
if len(X_shape) > 1:
factor_children_indices = _get_indices(factor_children_indices)
factor_parents_indices = _get_indices(factor_parents_indices)
fator_log_potentials_indices = _get_indices(fator_log_potentials_indices)
return (
fg,
factor_children_indices,
factor_parents_indices,
fator_log_potentials_indices,
)
class NoisyOR_BP:
"""Trains a NoisyOR model with max-product."""
def __init__(self, config):
self.config = config
np.random.seed(self.config.seed)
self.rng = jax.random.PRNGKey(self.config.seed)
# Load data
(
self.Xv_gt_train,
self.Xv_gt_test,
self.edges_children_to_parents,
self.X_shape,
self.log_potentials_shape,
self.leak_potentials_mask,
self.dont_update_potentials_mask,
self.slice_visible,
self.slice_hidden,
self.leak_node_idx,
) = data.DATA_LOADER[self.config.data.dataset](**self.config.data.args)
if self.Xv_gt_test is None:
# Train-test split
self.Xv_gt_train, self.Xv_gt_test = data.train_test_shuffle_split(
self.Xv_gt_train, self.config.seed, self.config.data.ratio_train
)
else:
np.random.shuffle(self.Xv_gt_train)
np.random.shuffle(self.Xv_gt_test)
self.has_dense_Xv = isinstance(self.Xv_gt_train, np.ndarray)
if not isinstance(self.slice_visible, tuple):
self.slice_visible = (self.slice_visible,)
if not isinstance(self.slice_hidden, tuple):
self.slice_hidden = (self.slice_hidden,)
if not isinstance(self.leak_node_idx, tuple):
self.leak_node_idx = (self.leak_node_idx,)
# The mask indicates the hidden and visible variables
self.X_mask = np.zeros(shape=self.X_shape, dtype=float)
self.X_mask[self.slice_visible] = 1.0
self.X_mask[self.slice_hidden] = 1.0
# Create the factor graph
(
self.fg,
self.factor_children_indices,
self.factor_parents_indices,
self.factor_log_potentials_indices,
) = build_noisy_or_fg(self.edges_children_to_parents, self.X_shape)
print("Factor graph created")
# Get variable group and factor group from the factor graph
self.X = self.fg.variable_groups[0]
self.noisy_factor_group = self.fg.factor_groups[factor.EnumFactor][0]
# Create the BP functions for max-product
self.bp = infer.build_inferer(self.fg.bp_state, backend=config.backend)
# Create the optimizer
self.opt = optax.adam(learning_rate=config.learning.learning_rate)
def __hash__(self):
# pylint: disable=line-too-long
# See https://jax.readthedocs.io/en/latest/faq.html#strategy-2-marking-self-as-static
return hash(tuple(self.edges_children_to_parents.keys()))
def densify(self, Xv_batch):
"""Densify a sparse batch of activations."""
# Get dense matrix of observations
if self.has_dense_Xv:
Xv_batch_dense = Xv_batch
else:
assert len(self.slice_visible) == 1
Xv_batch_dense = np.zeros(((len(Xv_batch), self.slice_visible[0].stop)))
for idx_row, Xv_row in enumerate(Xv_batch):
if Xv_row.shape[0] > 0:
Xv_batch_dense[idx_row, Xv_row] = 1
return Xv_batch_dense
@functools.partial(
jax.jit,
static_argnames=("self", "noise_temperature", "n_hidden_by_sample"),
)
def posterior_sample(
self,
Xv_batch,
rng,
log_potentials,
noise_temperature,
n_hidden_by_sample,
):
"""Given a batch of visible variables, get samples or the mode of the posterior."""
n_samples = len(Xv_batch)
# First, create copies of the visible variables
if n_hidden_by_sample > 1:
Xv_batch = jnp.repeat(Xv_batch, repeats=n_hidden_by_sample, axis=0)
# Second, create the evidence array by clamping the visible variables
uX = jnp.zeros((n_samples * n_hidden_by_sample,) + self.X_shape + (2,))
uX = uX.at[(slice(None, None, None),) + self.slice_visible + (0,)].set(
(2 * Xv_batch - 1) * utils.CLIP_INF
)
# utils.CLIP_INF acts as a noisy channel between the observations and X
# Also clamp the node
uX = uX.at[(slice(None, None, None),) + self.leak_node_idx + (0,)].set(
utils.CLIP_INF
)
# Third, add Gumbel noise to the hidden variables
rng, rng_input = jax.random.split(rng)
# Note: we use a prior of 0.5 for the hidden variables
hidden_evidence = noise_temperature * jax.random.gumbel(
rng_input,
shape=uX[(slice(None, None, None),) + self.slice_hidden].shape,
)
uX = uX.at[(slice(None, None, None),) + self.slice_hidden].set(
hidden_evidence
)
# Update the log potentials
log_potentials_copied = log_potentials[self.factor_log_potentials_indices]
log_potential_matrix = jnp.zeros((log_potentials_copied.shape[0], 2, 2))
log_potential_matrix = log_potential_matrix.at[:, 0, 0].set(0.0)
log_potential_matrix = log_potential_matrix.at[:, 0, 1].set(utils.CLIP_INF)
log_potential_matrix = log_potential_matrix.at[:, 1, 0].set(
-log_potentials_copied
)
log_potential_matrix = log_potential_matrix.at[:, 1, 1].set(
utils.log1mexp(log_potentials_copied)
)
# Useful function for jax.vmap
def init_bp_arrays(uX, log_potential_matrix):
bp_arrays = self.bp.init(
evidence_updates={self.X: uX},
log_potentials_updates={
self.noisy_factor_group: log_potential_matrix
},
)
return bp_arrays
# Run max-product and get the beliefs
bp_arrays = jax.vmap(init_bp_arrays, in_axes=(0, None), out_axes=0)(
uX, log_potential_matrix
)
assert self.config.backend == "bp"
bp_arrays = jax.vmap(
functools.partial(
self.bp.run,
num_iters=self.config.bp.num_iters,
damping=self.config.bp.damping,
temperature=self.config.bp.temperature,
),
in_axes=0,
out_axes=0,
)(bp_arrays)
beliefs = jax.vmap(self.bp.get_beliefs, in_axes=0, out_axes=0)(bp_arrays)
map_states = infer.decode_map_states(beliefs)
X_samples = map_states[self.X]
# Clamp the visible variables and leak node
X_samples_clamped = X_samples.at[
(slice(None, None, None),) + self.slice_visible
].set(Xv_batch)
X_samples_clamped = X_samples_clamped.at[
(slice(None, None, None),) + self.leak_node_idx
].set(1.0)
return X_samples_clamped, rng
@functools.partial(jax.jit, static_argnames="self")
def log_joint_lik(self, X_samples, log_potentials):
"""Compute the expectation under the posterior of the log joint likelihood."""
log_potentials_copied = log_potentials[self.factor_log_potentials_indices]
def log_joint_lik_sample(X_sample):
"""Joint likelihood of the hidden and the visible."""
# Compute x_k * w_{k -> i}
X_sample_parents = X_sample[self.factor_parents_indices]
XW = X_sample_parents * log_potentials_copied
# Sum for each factor
sum_factor_XW = (
jnp.zeros(shape=self.X_shape).at[self.factor_children_indices].add(XW)
)
# Clipping to avoid nan
log_p_factors = -sum_factor_XW * (1 - X_sample) + X_sample * (
utils.log1mexp(sum_factor_XW)
)
# X_mask removes the leak node, which is never the children of a factor
log_p_factors *= self.X_mask
log_joint_lik = log_p_factors.sum()
return log_joint_lik
log_joint_lik = jax.vmap(log_joint_lik_sample, in_axes=0)(X_samples)
log_joint_lik = log_joint_lik.sum()
avg_log_joint_lik = log_joint_lik / X_samples.shape[0]
return avg_log_joint_lik, log_joint_lik
@functools.partial(jax.jit, static_argnames=("self", "n_hidden_by_sample"))
def compute_ELBO_from_samples(
self, X_samples, log_potentials, n_hidden_by_sample
):
"""Compute the ELBO given the posteriors samples of a batch.
Note: if we use multiple samples, we add the entropy of the posterior here,
and observe that its gradient vanishes.
"""
if n_hidden_by_sample == 1:
return self.log_joint_lik(X_samples, log_potentials)
def compute_ELBO_from_samples_same_Xv(X_samples_same_Xv):
"""Compute the ELBO given multiple samples for the same posterior."""
unique_mask, _, counts = utils.get_unique_masks_locations_counts(
X_samples_same_Xv
)
probas = counts / jnp.sum(counts)
probas = jnp.clip(probas, self.config.min_clip, None)
entropy = -jnp.sum(unique_mask * probas * jnp.log(probas))
avg_log_joint_lik, _ = self.log_joint_lik(
X_samples_same_Xv, log_potentials
)
return avg_log_joint_lik + entropy
# Group the posterior samples with same visible observation
X_samples_reshaped = X_samples.reshape(
(-1, n_hidden_by_sample) + X_samples.shape[1:]
)
# Compute the Elbo for each observation
elbo_samples = jax.vmap(
compute_ELBO_from_samples_same_Xv, in_axes=0, out_axes=0
)(X_samples_reshaped)
# Sum the Elbos
sum_elbo = elbo_samples.sum()
return sum_elbo / elbo_samples.shape[0], sum_elbo
@functools.partial(jax.jit, static_argnames="self")
def compute_gradients(self, X_samples, log_potentials):
"""Compute the gradients of the Elbo in closed-form."""
log_potentials_copied = log_potentials[self.factor_log_potentials_indices]
def compute_gradients_sample(X_sample):
"""Compute the gradient for a sample."""
# Compute x_k * w_{k -> i}
X_sample_parents = X_sample[self.factor_parents_indices]
XW = X_sample_parents * log_potentials_copied
# Sum for each factor
sum_factor_XW = (
jnp.zeros(shape=self.X_shape).at[self.factor_children_indices].add(XW)
)
# Children for each factor
X_sample_children = X_sample[self.factor_children_indices]
sum_factor_XW_children = sum_factor_XW[self.factor_children_indices]
grad_sample_flat = (
X_sample_children * X_sample_parents * g(sum_factor_XW_children)
- X_sample_parents
)
# Unflatten the gradients
grad_sample = (
jnp.zeros(shape=self.log_potentials_shape)
.at[self.factor_log_potentials_indices]
.add(grad_sample_flat)
)
return grad_sample
grad_samples = jax.vmap(compute_gradients_sample, in_axes=0)(X_samples)
return jnp.mean(grad_samples, axis=0)
def update_log_potentials(
self,
Xv_batch,
log_potentials,
opt_state,
noise_temperature,
n_hidden_by_sample,
):
"""Update the log potentials."""
# Sample from the posterior
X_samples, self.rng = self.posterior_sample(
Xv_batch,
self.rng,
log_potentials,
noise_temperature,
n_hidden_by_sample,
)
# Get the loss and the gradients
avg_elbo, _ = self.compute_ELBO_from_samples(
X_samples, log_potentials, n_hidden_by_sample
)
grad_log_potentials = self.compute_gradients(X_samples, log_potentials)
chex.assert_equal_shape([log_potentials, grad_log_potentials])
# Update the log potentials
updates, new_opt_state = self.opt.update(-grad_log_potentials, opt_state)
new_log_potentials = optax.apply_updates(log_potentials, updates)
new_log_potentials = jnp.clip(
new_log_potentials,
self.config.min_clip,
None,
)
# Do not update the fixed potentials
if self.dont_update_potentials_mask is not None:
new_log_potentials += self.dont_update_potentials_mask * (
log_potentials - new_log_potentials
)
return new_log_potentials, new_opt_state, avg_elbo
def eval_ELBOs_dataset(
self,
Xv,
log_potentials,
noise_temperature,
n_hidden_by_sample,
test_batch_size,
):
"""Compute the Elbo on an entire dataset."""
n_batches = (len(Xv) + test_batch_size - 1) // test_batch_size
all_sum_elbo_samples = 0.0
all_X_samples = []
for batch_idx in range(n_batches):
Xv_batch = Xv[
batch_idx * test_batch_size : (batch_idx + 1) * test_batch_size
]
Xv_batch_dense = self.densify(Xv_batch)
# Get the mode or a sample from the posterior
X_samples, self.rng = self.posterior_sample(
Xv_batch_dense,
self.rng,
log_potentials,
noise_temperature,
n_hidden_by_sample,
)
all_X_samples.append(X_samples)
# Compute the Elbo
_, sum_elbo_samples = self.compute_ELBO_from_samples(
X_samples, log_potentials, n_hidden_by_sample
)
all_sum_elbo_samples += sum_elbo_samples
X_samples = np.concatenate(all_X_samples, axis=0)
return all_sum_elbo_samples / len(Xv), X_samples
def train(self):
"""Train the noisy OR model."""
log_potentials = utils.init_log_potentials(
self.log_potentials_shape,
self.config.learning.proba_init,
self.leak_potentials_mask,
self.config.learning.leak_proba_init,
self.dont_update_potentials_mask,
self.config.learning.leak_proba_init_not_updated,
self.config.learning.noise_temperature_init,
self.config.min_clip,
)
opt_state = self.opt.init(log_potentials)
current_step = 0
train_noise_temperature = self.config.learning.noise_temperature
train_n_hidden_by_sample = self.config.learning.n_hidden_by_sample
test_noise_temperature = self.config.inference.noise_temperature
test_n_hidden_by_sample = self.config.inference.n_hidden_by_sample
test_batch_size = self.config.inference.test_batch_size
n_steps = self.config.learning.num_iters
train_batch_size = self.config.learning.train_batch_size
n_batches = (
len(self.Xv_gt_train) + train_batch_size - 1
) // train_batch_size
all_update_times = []
all_train_avg_elbos = []
all_eval_times = []
all_test_avg_elbos_mode = []
# Training iterations
print(f"Training for {n_steps} steps")
pbar = tqdm.tqdm(range(n_steps + 1))
display = {}
for it in pbar:
batch_idx = it % n_batches
Xv_batch = self.Xv_gt_train[
batch_idx * train_batch_size : (batch_idx + 1) * train_batch_size
]
# Gradient step
start_update = time.time()
Xv_batch_dense = self.densify(Xv_batch)
(
log_potentials,
opt_state,
train_avg_elbo,
) = self.update_log_potentials(
Xv_batch_dense,
log_potentials,
opt_state,
train_noise_temperature,
train_n_hidden_by_sample,
)
train_avg_elbo = float(jax.device_get(train_avg_elbo))
display["train_elbo"] = round(train_avg_elbo, 4)
# First iteration compiles
if current_step > 0:
update_time = time.time() - start_update
all_update_times.append(update_time)
all_train_avg_elbos.append(train_avg_elbo)
# Evaluation step
if (
current_step % self.config.learning.eval_every == 0
or current_step == n_steps
):
start_eval = time.time()
test_avg_elbo_mode, test_X_samples = self.eval_ELBOs_dataset(
self.Xv_gt_test[: self.config.inference.test_size_eval_and_store],
log_potentials,
test_noise_temperature,
test_n_hidden_by_sample,
test_batch_size,
)
test_avg_elbo_mode = float(jax.device_get(test_avg_elbo_mode))
display["test_elbo"] = round(test_avg_elbo_mode, 4)
eval_time = time.time() - start_eval
if current_step > 0:
all_eval_times.append(eval_time)
all_test_avg_elbos_mode.append(test_avg_elbo_mode)
# When we store_inference_results, evaluate on the training set in the end
if (
current_step == n_steps
and self.config.inference.store_inference_results
):
last_train_avg_elbo_mode, last_train_X_samples = (
self.eval_ELBOs_dataset(
self.Xv_gt_train[
: self.config.inference.test_size_eval_and_store
],
log_potentials,
test_noise_temperature,
test_n_hidden_by_sample,
test_batch_size,
)
)
last_train_avg_elbo_mode = float(
jax.device_get(last_train_avg_elbo_mode)
)
pbar.set_postfix(display)
current_step += 1
print("Training finished")
results = {
"config": self.config,
"log_potentials": log_potentials,
"all_train_avg_elbos": all_train_avg_elbos,
"all_test_avg_elbos_mode": all_test_avg_elbos_mode,
"all_update_times": all_update_times,
"all_eval_times": all_eval_times,
"test_X_samples": test_X_samples,
}
return results
def g(x):
"""Stable implementation of g(x) = 1 / (1 - exp(-x))."""
stable_g = jnp.where(
x >= 0, 1.0 / (1.0 - jnp.exp(-x)), jnp.exp(x) / (jnp.exp(x) - 1.0)
)
return jnp.clip(stable_g, utils.CLIP_INF, None)
|
max_product_noisy_or-main
|
mp_noisy_or/noisy_or_bp.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils functions for computing the results."""
import jax
from jax.lax import dynamic_slice
from jax.lax import pad
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import scipy
def plot_images(
images,
display=True,
nr=None,
images_min=None,
images_max=None,
colorbar=False,
):
"""Useful function for visualizing several images."""
n_images, H, W = images.shape
if images_min is not None:
images_min = min(images_min, images.min())
else:
images_min = images.min()
images = images - images_min
if images_max is not None:
images_max = max(images_max, images.max())
else:
images_max = images.max()
images /= images_max + 1e-10
if nr is None:
nr = nc = np.ceil(np.sqrt(n_images)).astype(int)
else:
nc = (n_images + nr - 1) // nr
big_image = np.ones(((H + 1) * nr + 1, (W + 1) * nc + 1, 3))
big_image[..., :3] = 0
big_image[:: H + 1] = [0.5, 0, 0.5]
im = 0
for r in range(nr):
for c in range(nc):
if im < n_images:
big_image[
(H + 1) * r + 1 : (H + 1) * r + 1 + H,
(W + 1) * c + 1 : (W + 1) * c + 1 + W,
:,
] = images[im, :, :, None]
im += 1
if display:
plt.figure(figsize=(10, 10))
plt.imshow(big_image, interpolation="none")
plt.axis("off")
if colorbar:
plt.colorbar()
return big_image
#################################
#### Binary deconvolution #######
#################################
def BMF_metrics(
Xv_gt, X_samples, log_potentials, config, log_potentials_threshold
):
"""Process the BMF outputs and compute the reconstruction error."""
# Threshold the log potentials
data_config = config.data.args
V_bp = log_potentials[: data_config.rank, : data_config.n_cols]
V_bp = (V_bp >= log_potentials_threshold).astype(float)
# Extract the inferred hidden
U_bp = X_samples[:, 0, : data_config.rank]
# print("Avg hidden activations", U_bp.mean())
return BMF_reconstruction(Xv_gt, U_bp, V_bp)
def BMF_reconstruction(Xv_gt, U_bp, V_bp):
"""Reconstruction error for BMF."""
# Reconstruct X
X_bp = np.array(U_bp.dot(V_bp))
X_bp[X_bp >= 1] = 1
rec_ratio_X = np.abs(X_bp - Xv_gt).sum() / Xv_gt.size
return rec_ratio_X
#################################
#### OVERPARAMETRIZATION ########
#################################
# Params from the paper
PRIOR_THRESHOLD = 0.02
MATCHING_THRESHOLD = 1
def count_gt_recovered(Xh_gt, log_potentials_BP):
"""Count the nujber of GT parameters recovered."""
n_latent = log_potentials_BP.shape[0] - 1
# print("prior", log_potentials_BP[-1, -1], log_potentials_BP[-1, :n_latent])
priors_BP = 1 - np.exp(-log_potentials_BP)[-1, :n_latent]
keep = priors_BP > PRIOR_THRESHOLD
log_potentials_BP_filtered = log_potentials_BP[:n_latent, :-1][keep]
n_latent_filtered = log_potentials_BP_filtered.shape[0]
n_gt = Xh_gt.shape[0]
log_potentials_gt = -np.log(Xh_gt)
matching_cost = np.zeros((n_gt, n_latent_filtered))
for idx_latent in range(n_latent_filtered):
for idx_gt in range(n_gt):
matching_cost[idx_gt, idx_latent] = np.max(
np.abs(
log_potentials_BP_filtered[idx_latent] - log_potentials_gt[idx_gt]
)
)
rows_indices, cols_indices = scipy.optimize.linear_sum_assignment(
matching_cost
)
n_matched = sum(
matching_cost[rows_indices, cols_indices] <= MATCHING_THRESHOLD
)
return n_matched
#################################
#### BLIND DECONVOLUTION ########
#################################
@jax.jit
def or_layer_jax(S, W):
"""Jax convolution of S and W for 2D BD."""
_, n_feat, s_height, s_width = S.shape
_, n_feat, feat_height, feat_width = W.shape
im_height, im_width = s_height + feat_height - 1, s_width + feat_width - 1
# Revert the features to have the proper orientations
Wrev = W[:, :, ::-1, ::-1]
# Pad the feature locations
Spad = pad(
S,
0.0,
(
(0, 0, 0),
(0, 0, 0),
(feat_height - 1, feat_height - 1, 0),
(feat_width - 1, feat_width - 1, 0),
),
)
# Convolve Spad and W
def compute_sample(Spad1):
def compute_pixel(r, c):
X1 = (
1
- dynamic_slice(Spad1, (0, r, c), (n_feat, feat_height, feat_width))
* Wrev
).prod((1, 2, 3))
return 1 - X1
compute_cols = jax.vmap(compute_pixel, in_axes=(None, 0), out_axes=1)
compute_rows_cols = jax.vmap(compute_cols, in_axes=(0, None), out_axes=1)
return compute_rows_cols(jnp.arange(im_height), jnp.arange(im_width))
return jax.vmap(compute_sample, in_axes=0, out_axes=0)(Spad)
def BD_reconstruction(Xv_gt_test, test_X_samples, log_potentials_thre):
"""Reconstruction error for BD."""
if log_potentials_thre.ndim == 3:
_, im_height, im_width = Xv_gt_test.shape
n_feat, feat_height, feat_width = log_potentials_thre.shape
s_height = im_height - feat_height + 1
s_width = im_width - feat_width + 1
feats_activations = test_X_samples[:, :n_feat, :s_height, :s_width].astype(
float
)
rec_X_test = or_layer_jax(feats_activations, log_potentials_thre[None])[
:, 0
]
elif log_potentials_thre.ndim == 4:
_, _, im_height, im_width = Xv_gt_test.shape
_, n_feat, feat_height, feat_width = log_potentials_thre.shape
s_height = im_height - feat_height + 1
s_width = im_width - feat_width + 1
feats_activations = test_X_samples[:, :n_feat, :s_height, :s_width].astype(
float
)
rec_X_test = or_layer_jax(feats_activations, log_potentials_thre)
rec_ratio = np.abs(Xv_gt_test != rec_X_test).sum() / rec_X_test.size
return feats_activations, rec_X_test, rec_ratio
def iou(a, b):
return np.logical_and(a, b).sum() / np.logical_or(a, b).sum()
def features_iou(W_gt, log_potentials_thre):
"""Compute the features IOU."""
assert log_potentials_thre.shape == (5, 6, 6)
n_gt = W_gt.shape[0]
n_log_potentials = log_potentials_thre.shape[0]
matching_costs = np.zeros((n_gt, n_log_potentials))
for idx_latent in range(n_log_potentials):
for idx_gt in range(n_gt):
# List all the options
matching_cost_options = []
for offset_r in range(2):
for offset_c in range(2):
matching_cost_options.append(
iou(
log_potentials_thre[
idx_latent,
offset_r : offset_r + 5,
offset_c : offset_c + 5,
],
W_gt[idx_gt],
)
)
matching_costs[idx_gt, idx_latent] = np.max(matching_cost_options)
# Hungarian matching
rows_indices, cols_indices = scipy.optimize.linear_sum_assignment(
-matching_costs
)
# print(matching_costs[rows_indices, cols_indices])
return np.mean(matching_costs[rows_indices, cols_indices])
def visualize_cuts(feats):
"""Visualize cuts."""
assert feats.ndim == 4
n_samples, _, img_width, img_height = feats.shape
vup, vdown, hup, hdown = feats[:, 0], feats[:, 1], feats[:, 2], feats[:, 3]
images = np.zeros((n_samples, 2 * img_width, 2 * img_height), float)
# image[1::2, ::2] = 0.5 + np.where(vup > vdown, vup * 0.5, vdown * -0.5)
# image[::2, 1::2] = 0.5 + np.where(hup > hdown, hup * 0.5, hdown * -0.5)
images[:, 1::2, ::2] += vup + 2 * vdown
images[:, ::2, 1::2] -= hup + 2 * hdown
return images
|
max_product_noisy_or-main
|
mp_noisy_or/results_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.