python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init file for the mp_noisy_or package."""
|
max_product_noisy_or-main
|
mp_noisy_or/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Our proposed hybrid approach where max-product is used to initialize VI."""
from mp_noisy_or import noisy_or_bp
from mp_noisy_or import noisy_or_vi
# pylint: disable=invalid-name
class NoisyOR_Hybrid:
"""Trains a NoisyOR model with the hybrid approach."""
def __init__(self, config_BP, config_VI):
# Seeds must match
config_VI.seed = config_BP.seed
if "seed" in config_BP.data.args:
assert "seed" in config_VI.data.args
config_VI.data.args.seed = config_BP.data.args.seed
self.noisy_or_bp = noisy_or_bp.NoisyOR_BP(config=config_BP)
self.noisy_or_vi = noisy_or_vi.NoisyOR_VI(config=config_VI)
def train(self):
results_BP = self.noisy_or_bp.train()
log_potentials_BP = results_BP["log_potentials"]
# The init Elbo will be evaluated
results_VI = self.noisy_or_vi.train(log_potentials_BP)
return results_BP, results_VI
|
max_product_noisy_or-main
|
mp_noisy_or/noisy_or_hybrid.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils functions."""
import jax
import jax.numpy as jnp
import numpy as np
CLIP_INF = -1e6
# pylint: disable=invalid-name
# pylint: disable=g-explicit-length-test
@jax.jit
def log1mexp(x):
"""Stable implementation of f(x) = log(1 - exp(-x)) for x >= 0 following https://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf."""
y = jnp.where(
x <= jnp.log(2),
jnp.log(-jnp.expm1(-x)),
jnp.log1p(-jnp.exp(-x))
)
return jnp.clip(y, CLIP_INF, None)
@jax.jit
def get_unique_masks_locations_counts(array):
"""Jit compatible in-house implementations of jnp.unique."""
n_rows = array.shape[0]
masks = jnp.zeros((n_rows,) + array.shape)
for idx in range(array.shape[0]):
mask = jnp.zeros(array.shape)
mask = mask.at[: idx + 1].set(-jnp.inf)
masks = masks.at[idx].set(mask)
def unique_mask_locations_counts_it(unique_mask_locations_counts, it):
# In unique_mask:
# -1: not seen nor yet a copy
# 0: already a copy
# 1: unique element
# In location:
# -1: not seen nor yet a copy
# i: copy of the element at location i
# In counts:
# -1: not seen nor yet a copy
# 0: already a copy
# i: unique elements with i copies
unique_mask, locations, counts = unique_mask_locations_counts
not_seen_yet = abs(unique_mask[it])
row = jax.lax.dynamic_slice(array, (it, 0), (1,) + array.shape[1:])
this_mask = jax.lax.dynamic_slice(masks, (it, 0, 0), (1,) + array.shape)
array_masked = (array + this_mask)[0]
is_copy = jnp.all(row - array_masked == 0, axis=1).astype(float)
# 0s or above are left unchanged
new_unique_mask = (is_copy - 1) * (unique_mask < 0) + unique_mask * (
unique_mask >= 0
)
new_unique_mask = new_unique_mask.at[it].set(not_seen_yet)
new_locations = ((it + 1) * is_copy - 1.0) * (locations < 0) + locations * (
locations >= 0
)
new_locations = new_locations.at[it].set(
not_seen_yet * it + (1 - not_seen_yet) * locations[it]
)
new_counts = (is_copy - 1) * (counts < 0) + counts * (counts >= 0)
new_counts = new_counts.at[it].set(not_seen_yet * (jnp.sum(is_copy) + 1))
return (new_unique_mask, new_locations, new_counts), None
unique_mask = -jnp.ones(n_rows)
locations = -jnp.ones(n_rows)
counts = -jnp.ones(n_rows)
unique_mask, locations, counts = jax.lax.scan(
unique_mask_locations_counts_it,
(unique_mask, locations, counts),
jnp.arange(n_rows),
)[0]
return unique_mask, locations, counts
# def test():
# arr = np.array(
# [[1, 2], [1, 2], [2, 3], [2, 4], [2, 3], [2, 5], [2, 5], [1, 2]]
# )
# get_unique_mask_location(arr)
# # np.unique(arr, axis=0, return_index=True, return_inverse=True)
# pass
########################################################
###################### Init utils ######################
########################################################
def init_log_potentials(
log_potentials_shape,
proba_init,
leak_potentials_mask,
leak_proba_init,
dont_update_potentials_mask,
leak_proba_init_not_updated,
noise_temperature_init,
min_clip,
):
"""Initialize the array of log potentials."""
# First define the probabilities
proba_init = np.full(log_potentials_shape, fill_value=proba_init)
# Add noise to break symmetry
proba_init += noise_temperature_init * np.random.uniform(
low=-1.0, high=1.0, size=log_potentials_shape
)
# Optionally initialize the edges to leak differently
if leak_potentials_mask is not None:
leak_proba_init = np.full(log_potentials_shape, fill_value=leak_proba_init)
proba_init += leak_potentials_mask * (leak_proba_init - proba_init)
# Optionally initialize some fixed edges differently
if dont_update_potentials_mask is not None:
leak_proba_init_not_updated = np.full(
log_potentials_shape, fill_value=leak_proba_init_not_updated
)
proba_init += dont_update_potentials_mask * (
leak_proba_init_not_updated - proba_init
)
# Clip the probabilities
proba_init = jnp.clip(proba_init, 0.0, 1.0)
# Define the log potentials
log_potentials = jnp.full(
log_potentials_shape, fill_value=-jnp.log(proba_init)
)
# Clip the log potentials
log_potentials = jnp.clip(log_potentials, min_clip, None)
return log_potentials
########################################################
###################### VI utils ########################
########################################################
def get_value_by_indices(arr, indices, has_multidim_arrays):
"""Returns the values associated to indices, or arrays of indices."""
assert isinstance(arr, jnp.ndarray)
if has_multidim_arrays:
if not isinstance(indices, jnp.ndarray):
raise TypeError(
f"Expected indices of type tuple or jax array. Got {type(indices)}"
)
if indices.shape[0] == 0:
return 0.0
elif indices.ndim == 1:
return arr[tuple(indices)]
else:
return jax.vmap(lambda idx: arr[tuple(idx)], in_axes=0)(indices)
else:
# Fill out of bounds value
return arr.at[indices].get(mode="fill", fill_value=0.0)
def set_value_for_indices(arr, indices, values, has_multidim_arrays):
"""Set the values associated to indices, or arrays of indices."""
assert isinstance(arr, jnp.ndarray)
if has_multidim_arrays:
if not isinstance(indices, jnp.ndarray):
raise TypeError(
f"Expected indices of type tuple or jax array. Got {type(indices)}"
)
if indices.shape[0] == 0:
return arr
elif indices.ndim == 1:
# Single update
return arr.at[tuple(indices)].set(values)
else:
def f(arr, it):
"""Useful function."""
idx = indices[it]
val = values[it]
return arr.at[tuple(idx)].set(val), None
return jax.lax.scan(f, arr, jnp.arange(values.shape[0]))[0]
else:
# Drop out of bounds indices
return arr.at[indices].set(values, mode="promise_in_bounds")
def add_value_to_indices(arr, indices, values, has_multidim_arrays):
"""Set the values associated to indices, or arrays of indices."""
assert isinstance(arr, jnp.ndarray)
if has_multidim_arrays:
if not isinstance(indices, jnp.ndarray):
raise TypeError(
f"Expected indices of type tuple or jax array. Got {type(indices)}"
)
if indices.shape[0] == 0:
return arr
elif indices.ndim == 1:
return arr.at[tuple(indices)].add(values)
else:
def f(arr, it):
"""Useful function."""
idx = indices[it]
val = values[it]
return arr.at[tuple(idx)].add(val), None
return jax.lax.scan(f, arr, jnp.arange(values.shape[0]))[0]
else:
# Drop out of bounds indices
return arr.at[indices].add(values, mode="promise_in_bounds")
def build_local_model(Xv_gt, dict_child_to_parents, n_layers):
"""Build local models as described in the VI paper, Section 5.1."""
# Build local models of active hidden variables
Xh_gt = []
for Xv_row in Xv_gt:
Xh_row = []
Xh_row_layer = Xv_row
# At each layer, extract all the parents from the layer above
for _ in range(n_layers):
if len(Xh_row_layer) == 0:
break
Xh_row_next_layer = np.concatenate(
[dict_child_to_parents[idx_child] for idx_child in Xh_row_layer]
)
Xh_row_next_layer = np.unique(Xh_row_next_layer).tolist()
Xh_row += Xh_row_next_layer
# Update
Xh_row_layer = Xh_row_next_layer
assert len(Xh_row_layer) == 0
Xh_gt.append(Xh_row)
return Xh_gt
def dict_to_array(d, is_multidim, dtype, fill_value):
"""Convert a dict to an array with padding. Keys can be tuple."""
keys = list(d.keys())
max_n_values = max([len(v) for v in d.values()])
if not is_multidim:
key_max = max(keys)
keys_shape = (key_max + 1,)
assert keys_shape == (len(keys),)
mat_shape = keys_shape + (max_n_values,)
else:
key_maxes = np.max(np.array(keys), axis=0)
keys_shape = tuple(x + 1 for x in key_maxes)
n_dim = key_maxes.shape[0]
mat_shape = keys_shape + (max_n_values,) + (n_dim,)
# Create matrix with default value, which is being broadcasted
array = np.full(shape=mat_shape, fill_value=fill_value, dtype=dtype)
for k, v in d.items():
if len(v) > 0:
array[k][: len(v)] = v
return jnp.array(array)
def list_of_arrays_to_array(list_of_mats, dtype, fill_value):
"""Convert a list of arrays to an array with padding."""
max_n_cols = max([len(row) for row in list_of_mats])
array = np.zeros((len(list_of_mats), max_n_cols), dtype=dtype)
for idx, row in enumerate(list_of_mats):
array[idx, : len(row)] = row
array[idx, len(row) :] = fill_value
return jnp.array(array)
|
max_product_noisy_or-main
|
mp_noisy_or/utils.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines data loader."""
import os
import numpy as np
import scipy.io as sio
from sklearn import cluster
from tensorflow import keras
import tensorflow_datasets as tfds
import pickle
DATA_FOLDER = "data/"
OVERPARAM_DATA_FOLDER = (
"data/overparam/datasets/"
)
"""
To be used by our BP and VI training, a data loader must return:
Xv_gt_train: the training data
Xv_gt_test: the optional test data
edges_children_to_parents: A dictionnary representing the noisy OR Bayesian network as
{idx_child: {idx_parent: idx_potential}}
X_shape: the shape of the unique array X containing all the hidden and visible
variables
log_potentials_shape: the shape of the unique array LP containing all the log
potentials
leak_potentials_mask: a mask indicating the potentials which connect a
variable to the leak node
dont_update_potentials_mask: a mask indicating the potentials that we do not
want to update (often representing the noise probabilities)
slice_visible: a slice indicating the visible variables in X
slice_hidden: a slice indicating the hidden variables in X
leak_node_idx: the index of the leak node in X
"""
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=g-doc-return-or-yield
# pylint: disable=g-import-not-at-top
# pylint: disable=comparison-with-itself
# pylint: disable=g-complex-comprehension
#################################
###### Graph generation #########
#################################
def build_one_layer_or_factors(
distmat, children_offset, n_clusters_by_node=5, ratio_children_parents=3
):
"""Build one layer of ORFactors in the hierarchy."""
n_children = distmat.shape[0]
n_parents = n_children // ratio_children_parents
# First create n_parents clusters by hierarchical clustering
clustering = cluster.AgglomerativeClustering(
n_clusters=n_parents, affinity="precomputed", linkage="average"
)
clustering.fit(distmat)
# Second add each node to a minimum of clusters
# Compute the distance from nodes to clusters
dist_to_clusters = np.zeros((n_children, n_parents))
for idx_cluster in range(n_parents):
this_cluster = np.where(clustering.labels_ == idx_cluster)[0]
dist_to_cluster = distmat[this_cluster].mean(axis=0)
dist_to_clusters[:, idx_cluster] = dist_to_cluster
nodes_to_clusters = np.argsort(dist_to_clusters, axis=1)[
:, :n_clusters_by_node
]
# Compute the edges_children_to_parents
edges_children_to_parents = {}
for child_idx in range(n_children):
cluster_idx = clustering.labels_[child_idx]
closest_cluster_indices = list(nodes_to_clusters[child_idx])
if cluster_idx not in closest_cluster_indices:
# Rare case
edges_children_to_parents[child_idx] = closest_cluster_indices + [
cluster_idx
]
else:
edges_children_to_parents[child_idx] = closest_cluster_indices
# Third, create the distance matrix for the next layer
edges_p2c = {idx: [] for idx in range(n_parents)}
for idx_child, idx_parents in edges_children_to_parents.items():
for idx_parent in idx_parents:
edges_p2c[idx_parent].append(idx_child)
# Recompute the distance from nodes to each of the new clusters
dist_to_clusters = np.zeros((n_children, n_parents))
for idx_cluster in range(n_parents):
this_cluster = np.array(edges_p2c[idx_cluster])
dist_to_cluster = distmat[this_cluster].mean(axis=0)
dist_to_clusters[:, idx_cluster] = dist_to_cluster
distmat_next_layer = np.zeros((n_parents, n_parents))
for idx_cluster in range(n_parents):
this_cluster = np.array(edges_p2c[idx_cluster])
dist_between_clusters = dist_to_clusters[this_cluster].mean(axis=0)
distmat_next_layer[idx_cluster] = dist_between_clusters
# Add offsets
offset_edges_children_to_parents = {
children_offset
+ idx_child: [
children_offset + n_children + idx_parent
for idx_parent in idx_parents
]
for idx_child, idx_parents in edges_children_to_parents.items()
}
children_offset_next_layer = children_offset + n_children
return (
offset_edges_children_to_parents,
distmat_next_layer,
children_offset_next_layer,
)
def build_or_factors(
data_train,
n_nodes_visible,
n_layers,
file_to_save=None,
sparse_data=False,
data_test=None,
):
"""Build a hierachical noisy OR Bayesian network."""
print("Building the Bayesian network...")
cooccurences = np.zeros((n_nodes_visible, n_nodes_visible))
counts = np.zeros((n_nodes_visible,)) + 1e-8
for data_row in data_train:
# For tiny20 we have all the observations
if len(data_row) == n_nodes_visible:
nodes = np.where(data_row)[0]
# For large datasets we only have the activations
else:
nodes = data_row
for node in nodes:
cooccurences[node, nodes] += 1
counts[node] += 1
# Normalize the counts
# https://jmlr.org/papers/volume8/globerson07a/globerson07a.pdf
norm_weights = cooccurences / counts.reshape(1, -1)
norm_weights /= counts.reshape(-1, 1)
norm_weights *= len(data_train)
# Build the initial distance matrix
distmat = np.exp(-norm_weights)
np.fill_diagonal(distmat, 0)
# Create the first layer
(
edges_children_to_parents,
distmat_next_layer,
offset_nodes_next_layer,
) = build_one_layer_or_factors(distmat, 0)
# Create all the layers until the last one
for _ in range(n_layers - 2):
(
new_edges_children_to_parents,
distmat_next_layer,
offset_nodes_next_layer,
) = build_one_layer_or_factors(distmat_next_layer, offset_nodes_next_layer)
for k, v in new_edges_children_to_parents.items():
edges_children_to_parents[k] = v
# Create the last layer
n_children = distmat_next_layer.shape[0]
leak_node_idx = offset_nodes_next_layer + n_children
for idx_child in range(n_children):
edges_children_to_parents[offset_nodes_next_layer + idx_child] = [
leak_node_idx
]
# Add potential index and value
idx_potential = 0
edges_children_to_parents_augmented = {}
leak_potentials_mask = []
for idx_child, idx_parents in edges_children_to_parents.items():
edges_children_to_parents_augmented[idx_child] = {}
for idx_parent in idx_parents:
edges_children_to_parents_augmented[idx_child][idx_parent] = idx_potential
idx_potential += 1
leak_potentials_mask.append(int(idx_parent == leak_node_idx))
# Connect to leak node
if leak_node_idx not in idx_parents:
edges_children_to_parents_augmented[idx_child][
leak_node_idx
] = idx_potential
idx_potential += 1
leak_potentials_mask.append(1)
# Return the required quantities
log_potentials_shape = (idx_potential,)
n_nodes = max(edges_children_to_parents.keys()) + 2 # add leak node
X_shape = (n_nodes,)
slice_visible = np.s_[:n_nodes_visible]
slice_hidden = np.s_[n_nodes_visible : n_nodes - 1]
assert leak_node_idx == n_nodes - 1
if sparse_data:
data_train = [np.where(data_row != 0)[0] for data_row in data_train]
if data_test is not None:
data_test = [np.where(data_row != 0)[0] for data_row in data_test]
# Optionally save
if file_to_save is not None:
data_to_save = {
"Xv_gt_train": data_train,
"Xv_gt_test": data_test,
"edges_children_to_parents": edges_children_to_parents_augmented,
"X_shape": X_shape,
"log_potentials_shape": log_potentials_shape,
"leak_potentials_mask": leak_potentials_mask,
"slice_visible": slice_visible,
"slice_hidden": slice_hidden,
"leak_node_idx": leak_node_idx,
}
print("Saving processed data at {}...".format(file_to_save))
with open(file_to_save, "wb") as f:
pickle.dump(data_to_save, f)
return (
data_train,
data_test,
edges_children_to_parents_augmented,
X_shape,
log_potentials_shape,
leak_potentials_mask,
None,
slice_visible,
slice_hidden,
leak_node_idx,
)
def train_test_shuffle_split(Xv_gt, seed, ratio_train):
"""Split train and test."""
np.random.seed(seed)
assert ratio_train <= 1
# Random train and test: shuffle first, then split
n_samples_train = int(ratio_train * len(Xv_gt))
np.random.shuffle(Xv_gt)
Xv_gt_train = Xv_gt[:n_samples_train]
Xv_gt_test = Xv_gt[n_samples_train:]
return Xv_gt_train, Xv_gt_test
#################################
####### Tiny20 dataset ##########
#################################
def load_20news_w100(n_layers, sparse_data=False):
"""Load the data for the tiny20 dataset."""
# Load dataset and words
filename = DATA_FOLDER + "20news_w100.mat"
with open(filename, "rb") as f:
data = sio.loadmat(f)
documents = data["documents"].todense()
Xv_gt = np.array(documents.T.astype(float))
n_words = Xv_gt.shape[1]
return build_or_factors(Xv_gt, n_words, n_layers, sparse_data=sparse_data)
#################################
###### Tensorflow datasets ######
#################################
def load_yelp_dataset(**kwargs):
return load_large_datasets(dataset="yelp_polarity_reviews", **kwargs)
def load_imdb_dataset(**kwargs):
return load_large_datasets(dataset="imdb_reviews", **kwargs)
def load_abstract_dataset(**kwargs):
return load_large_datasets(dataset="scientific_papers", **kwargs)
def load_agnews_dataset(**kwargs):
return load_large_datasets(dataset="ag_news_subset", **kwargs)
def load_patent_dataset(**kwargs):
return load_large_datasets(dataset="big_patent/f", **kwargs)
def load_large_datasets(
dataset, key_name, vocab_size, max_sequence_length, n_layers
):
"""Load data for large Tensorflow datasets."""
# https://www.tensorflow.org/datasets/catalog/scientific_papers
filename = (
DATA_FOLDER
+ "{}_vocabsize_{}_nlayers_{}_maxseqlength{}.npz".format(
dataset, vocab_size, n_layers, max_sequence_length
)
)
if os.path.exists(filename):
print("Loading processed data at {}...".format(filename))
data = pickle.load(filename)
return (
data["Xv_gt_train"],
data["Xv_gt_test"],
data["edges_children_to_parents"],
data["X_shape"],
data["log_potentials_shape"],
data["leak_potentials_mask"],
None,
data["slice_visible"],
data["slice_hidden"],
data["leak_node_idx"],
)
# Training set
data_train = tfds.load(dataset, split="train", batch_size=-1)
data_train = tfds.as_numpy(data_train)[key_name]
data_test = tfds.load(dataset, split="test", batch_size=-1)
data_test = tfds.as_numpy(data_test)[key_name]
# Define the vectorizer on the training data
# https://www.tensorflow.org/tutorials/load_data/text
vectorize_layer = keras.layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=max_sequence_length,
)
vectorize_layer.adapt(data_train)
data_train = np.array(vectorize_layer(data_train))
data_test = np.array(vectorize_layer(data_test))
# vectorize_layer.get_vocabulary() gives the words
print(vectorize_layer.get_vocabulary()[:100])
print("Data train shape: ", data_train.shape)
print("Data test shape: ", data_test.shape)
train_binaries = []
for train_row in data_train:
unique_words = np.unique(train_row)
# Remove elements 0 and 1, which are '' and UNK
unique_words = unique_words[
np.logical_and(unique_words != 0, unique_words != 1)
]
train_binaries.append(unique_words)
test_binaries = []
for test_row in data_test:
unique_words = np.unique(test_row)
# Remove elements 0 and 1, which are '' and UNK
unique_words = unique_words[
np.logical_and(unique_words != 0, unique_words != 1)
]
test_binaries.append(unique_words)
# Build the OR factor
return build_or_factors(
data_train=train_binaries,
n_nodes_visible=vocab_size,
n_layers=n_layers,
file_to_save=filename,
data_test=test_binaries,
sparse_data=False, # data is already sparsified
)
#################################
#### Binary deconvolution #######
#################################
def load_binary_deconvolution_data(dataset_name="pmp", W_shape=(16, 3, 3)):
"""Load data for binary deconvolution."""
if dataset_name == "pmp":
# Load the data from the PMP paper
url = 'https://raw.githubusercontent.com/deepmind/PGMax/main/examples/example_data/conv_problem.npz'
path = keras.utils.get_file('conv_problem.npz', url)
data = np.load(path)
W_gt = data["W"][0]
X_gt_train = data["X"][:, 0, :, :]
X_gt_test = None
# Augment the parameters as in the PMP paper
n_feat, feat_height, feat_width = W_gt.shape
n_feat += 1
feat_height += 1
feat_width += 1
else:
raise ValueError("Unknown dataset", dataset_name)
_, im_height, im_width = X_gt_train.shape
assert im_height == im_width
s_height = im_height - feat_height + 1
s_width = im_width - feat_width + 1
lp_shape = max([n_feat, feat_height, feat_width])
# The log-potentials LP are such that
# LP[:n_feat, :feat_height, :feat_width] give the failure probabilities
# LP[n_feat, 0, :n_feat] give the prior of the latent variables
# LP[n_feat, lp_shape - 1, lp_shape - 1] give the shared noise probability
log_potentials_shape = (n_feat + 1, lp_shape + 1, lp_shape + 1)
# The prior and failure probabilities are initialized differently
leak_potentials_mask = np.zeros(shape=log_potentials_shape)
leak_potentials_mask[n_feat, 0, :n_feat] = 1
# The noise probability is fixed during training
dont_update_potentials_mask = np.zeros(shape=log_potentials_shape)
dont_update_potentials_mask[n_feat, lp_shape, lp_shape] = 1
# The variables X are such that
# X[:n_feat, :s_height, :s_width] corresponds to the hidden variables Xh
# X[n_feat, :im_height, :im_width] corresponds to the visible variables Xv
# X[n_feat, im_height, im_width] is the leak node
X_shape = (n_feat + 1, im_height + 1, im_width + 1)
slice_visible = np.s_[n_feat, :im_height, :im_width]
slice_hidden = np.s_[:n_feat, :s_height, :s_width]
leak_node_idx = (n_feat, im_height, im_width)
edges_children_to_parents = {}
for idx_s_height in range(s_height):
for idx_s_width in range(s_width):
for idx_feat in range(n_feat):
# First, connect each hidden variable to the leak node
# with a feature-specific prior probability
edges_children_to_parents[(idx_feat, idx_s_height, idx_s_width)] = {
leak_node_idx: (n_feat, 0, idx_feat)
}
# Second, consider edges where the child is a visible variable
for idx_feat_height in range(feat_height):
for idx_feat_width in range(feat_width):
idx_img_height = idx_feat_height + idx_s_height
idx_img_width = idx_feat_width + idx_s_width
# Connect each visible variable to the leak node
# with shared noise probability
if (
n_feat,
idx_img_height,
idx_img_width,
) not in edges_children_to_parents:
edges_children_to_parents[
(n_feat, idx_img_height, idx_img_width)
] = {leak_node_idx: (n_feat, lp_shape, lp_shape)}
# Connect each visible variable to a hidden variable
# Format {idx_child: {idx_parent: idx_potential}}
edges_children_to_parents[(n_feat, idx_img_height, idx_img_width)][
(idx_feat, idx_s_height, idx_s_width)
] = (idx_feat, idx_feat_height, idx_feat_width)
return (
X_gt_train,
X_gt_test,
edges_children_to_parents,
X_shape,
log_potentials_shape,
leak_potentials_mask,
dont_update_potentials_mask,
slice_visible,
slice_hidden,
leak_node_idx,
)
#################################
## Binary matrix factorization ##
#################################
def load_BMF_data(seed, n_rows, rank, n_cols, p_Xon):
"""Generate the Binary Matrix Factorization data."""
np.random.seed(seed)
# Note that p(Xv_ij=1) = p_X = 1 - (1 - p_UV** 2) ** rank
p_UV = (1 - (1 - p_Xon) ** (1.0 / rank)) ** 0.5
U_gt_test = np.random.binomial(n=1, p=p_UV, size=(n_rows, rank))
U_gt_train = np.random.binomial(n=1, p=p_UV, size=(n_rows, rank))
V_gt = np.random.binomial(n=1, p=p_UV, size=(rank, n_cols))
Xv_gt_train = U_gt_train.dot(V_gt)
Xv_gt_train[Xv_gt_train >= 1] = 1
print("Average number of activations in X: ", Xv_gt_train.mean())
Xv_gt_test = U_gt_test.dot(V_gt)
Xv_gt_test[Xv_gt_test >= 1] = 1
# The log-potentials LP are such that
# LP[:rank, :n_cols] give the failure probabilities
# LP[rank, 0] give the shared noise probability
# LP[rank, n_cols] give the shared noise probability
log_potentials_shape = (rank + 1, n_cols + 1)
# The prior and failure probabilities are initialized differently
leak_potentials_mask = np.zeros(shape=log_potentials_shape)
leak_potentials_mask[rank, :n_cols] = 1
# The noise probability is fixed during training
dont_update_potentials_mask = np.zeros(shape=log_potentials_shape)
dont_update_potentials_mask[rank, n_cols] = 1
# The variables X are such that
# X[0, :rank] corresponds to the hidden variables U
# X[1, :n_cols] corresponds to the visible variables Xv
# X[1, n_cols] is the leak node
X_shape = (2, n_cols + 1)
slice_hidden = np.s_[0, :rank]
slice_visible = np.s_[1, :n_cols]
leak_node_idx = (1, n_cols)
edges_children_to_parents = {}
for idx_rank in range(rank):
# Connect each hidden to the leak node with a shared prior probability
edges_children_to_parents[(0, idx_rank)] = {leak_node_idx: (rank, 0)}
# Second consider edges where the child is a visible variable
for idx_col in range(n_cols):
if (1, idx_col) not in edges_children_to_parents:
# Connect each hidden to the leak node with a shared noise probability
edges_children_to_parents[(1, idx_col)] = {
leak_node_idx: (rank, n_cols)
}
# Connect each visible variable to a hidden variable
# Format {idx_child: {idx_parent: idx_potential}}
edges_children_to_parents[(1, idx_col)][(0, idx_rank)] = (
idx_rank,
idx_col,
)
return (
Xv_gt_train,
Xv_gt_test,
edges_children_to_parents,
X_shape,
log_potentials_shape,
leak_potentials_mask,
dont_update_potentials_mask,
slice_visible,
slice_hidden,
leak_node_idx,
p_UV,
)
#################################
###### Overparam datasets #######
#################################
def load_overparam_data(dataset_name, n_latent=8, img_size=64):
"""Load the data for the overparam experiments."""
assert n_latent < img_size
if dataset_name == "IMG-FLIP":
filename = (
OVERPARAM_DATA_FOLDER + dataset_name + "/samples/samples_str10percent"
)
else:
filename = (
OVERPARAM_DATA_FOLDER + dataset_name + "/samples/raw_samples_n10000_s0"
)
X_gt = np.loadtxt(open(filename, "rb"))
img_size = X_gt.shape[1]
# The log-potentials LP are such that
# LP[:n_latent, :img_size] give the failure probabilities
# LP[n_latent, :n_latent] give each hidden prior probability
# LP[[n_latent, img_size] give the shared noise probability
log_potentials_shape = (n_latent + 1, img_size + 1)
# The prior and failure probabilities are initialized differently
leak_potentials_mask = np.zeros(shape=log_potentials_shape)
leak_potentials_mask[n_latent, :-1] = 1
# The noise probability is fixed during training
dont_update_potentials_mask = np.zeros(shape=log_potentials_shape)
dont_update_potentials_mask[n_latent, img_size] = 1
# The variables X are such that
# X[0, :n_latent] corresponds to the hidden variables Xh
# X[1, :img_size] corresponds to the visible variables Xv
# X[0, img_size] is the leak node
X_shape = (2, img_size + 1)
slice_hidden = np.s_[0, :n_latent]
slice_visible = np.s_[1, :img_size]
leak_node_idx = (0, img_size)
edges_children_to_parents = {}
for idx_latent in range(n_latent):
# Connect each hidden to the leak node with the hidden prior probability
edges_children_to_parents[(0, idx_latent)] = {
leak_node_idx: (n_latent, idx_latent)
}
# Second consider edges where the child is a visible variable
for idx_pixel in range(img_size):
if (1, idx_pixel) not in edges_children_to_parents:
# Connect each hidden to the leak node with a shared prior probability
edges_children_to_parents[(1, idx_pixel)] = {
leak_node_idx: (n_latent, img_size)
}
# Connect each visible variable to a hidden variable
# Format {idx_child: {idx_parent: idx_potential}}
edges_children_to_parents[(1, idx_pixel)][(0, idx_latent)] = (
idx_latent,
idx_pixel,
)
return (
X_gt,
None,
edges_children_to_parents,
X_shape,
log_potentials_shape,
leak_potentials_mask,
dont_update_potentials_mask,
slice_visible,
slice_hidden,
leak_node_idx,
)
DATA_LOADER = {
"20news": load_20news_w100,
"BMF": load_BMF_data,
"2D_deconvolution": load_binary_deconvolution_data,
"yelp_polarity_reviews": load_yelp_dataset,
"imdb_reviews": load_imdb_dataset,
"scientific_papers": load_abstract_dataset,
"ag_news_subset": load_agnews_dataset,
"patent": load_patent_dataset,
"overparam": load_overparam_data,
}
|
max_product_noisy_or-main
|
mp_noisy_or/data.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the PMP experiment on 2Ddeconv and BMF."""
import collections
import datetime
from ml_collections import config_flags
import numpy as np
from pgmax import fgraph
from pgmax import fgroup
from pgmax import infer
from pgmax import vgroup
import scipy
import tqdm
from mp_noisy_or import utils
from absl import app
_CONFIGS = config_flags.DEFINE_config_file(
name="config",
default="config.py",
help_string="Training configuration",
)
# pylint: disable=line-too-long
# pylint: disable=invalid-name
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
###################################
######## 2D deconvolution #########
###################################
# From https://github.com/deepmind/PGMax/blob/main/examples/pmp_binary_deconvolution.ipynb
pW = 0.25
pS = 1e-75
pX = 1e-100
def PMP_2D_deconv(X_gt, is_training=True, W_learned=None):
"""Run the pertub-and-max-product algorithm for the 2D deconvolution experiment."""
n_feat, feat_height, feat_width = 5, 6, 6
if not is_training:
assert W_learned is not None
assert W_learned.shape == (n_feat, feat_height, feat_width)
n_images, im_height, im_width = X_gt.shape
s_height = im_height - feat_height + 1
s_width = im_width - feat_width + 1
###################################
### Step 1: Create factor graph ###
###################################
# Binary features
W = vgroup.NDVarArray(num_states=2, shape=(n_feat, feat_height, feat_width))
# Binary indicators of features locations
S = vgroup.NDVarArray(
num_states=2, shape=(n_images, n_feat, s_height, s_width)
)
# Auxiliary binary variables combining W and S
SW = vgroup.NDVarArray(
num_states=2,
shape=(n_images, im_height, im_width, n_feat, feat_height, feat_width),
)
# Binary images obtained by convolution
X = vgroup.NDVarArray(num_states=2, shape=X_gt.shape)
# Factor graph
fg = fgraph.FactorGraph(variable_groups=[S, W, SW, X])
# Define the logical factors
indices_for_ANDFactors = []
variables_for_ANDFactors = []
variables_for_ORFactors_dict = collections.defaultdict(list)
for idx_img in tqdm.trange(n_images):
for idx_s_height in range(s_height):
for idx_s_width in range(s_width):
for idx_feat in range(n_feat):
for idx_feat_height in range(feat_height):
for idx_feat_width in range(feat_width):
idx_img_height = idx_feat_height + idx_s_height
idx_img_width = idx_feat_width + idx_s_width
# Store the relevant indices for the learni g part
idx_S = (idx_img, idx_feat, idx_s_height, idx_s_width)
idx_W = (idx_feat, idx_feat_height, idx_feat_width)
idx_SW = (
idx_img,
idx_img_height,
idx_img_width,
idx_feat,
idx_feat_height,
idx_feat_width,
)
indices_for_ANDFactors.append([idx_S, idx_W, idx_SW])
variables_for_ANDFactor = [
S[idx_S],
W[idx_W],
SW[idx_SW],
]
variables_for_ANDFactors.append(variables_for_ANDFactor)
X_var = X[idx_img, idx_img_height, idx_img_width]
variables_for_ORFactors_dict[X_var].append(SW[idx_SW])
# Define the ANDFactors
AND_factor_group = fgroup.ANDFactorGroup(variables_for_ANDFactors)
# Define the ORFactors
variables_for_ORFactors = [
list(tuple(variables_for_ORFactors_dict[X_var]) + (X_var,))
for X_var in variables_for_ORFactors_dict
]
# Add the two FactorGroups, which is computationally efficient
OR_factor_group = fgroup.ORFactorGroup(variables_for_ORFactors)
fg.add_factors([AND_factor_group, OR_factor_group])
###################################
#### Step 2: Train noisy OR BP ####
###################################
# BP functions
bp = infer.BP(fg.bp_state, temperature=0.0)
# Define the evidence
uW = np.zeros((W.shape) + (2,))
if is_training:
uW[..., 1] = scipy.special.logit(pW)
uW += np.random.gumbel(size=uW.shape)
else:
uW[..., 0] = (2 * W_learned - 1) * utils.CLIP_INF
# Sparsity inducing priors for W and S
uS = np.zeros((S.shape) + (2,))
uS[..., 1] = scipy.special.logit(pS)
# Likelihood the binary images given X
uX = np.zeros((X_gt.shape) + (2,))
uX[..., 0] = (2 * X_gt - 1) * scipy.special.logit(pX)
# Run BP
bp_arrays = bp.init(
evidence_updates={
S: uS + np.random.gumbel(size=uS.shape),
W: uW,
SW: np.zeros(shape=SW.shape),
X: uX,
}
)
bp_arrays = bp.run_bp(bp_arrays, num_iters=1_000, damping=0.5)
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
return map_states[S], map_states[W]
def run_2D_deconv(seed, ratio_train=0.8):
"""Run 2D deconvolution for one seed."""
url = 'https://raw.githubusercontent.com/deepmind/PGMax/main/examples/example_data/conv_problem.npz'
path = keras.utils.get_file('conv_problem.npz', url)
data = np.load(path)
Xv_gt = data["X"][:, 0, :, :]
# Create train and test sets
n_samples_train = int(ratio_train * len(Xv_gt))
np.random.seed(seed)
np.random.shuffle(Xv_gt)
Xv_gt_train = Xv_gt[:n_samples_train]
Xv_gt_test = Xv_gt[n_samples_train:]
# Train
S_train, W_learned = PMP_2D_deconv(Xv_gt_train, is_training=True)
S_test, _ = PMP_2D_deconv(Xv_gt_test, is_training=False, W_learned=W_learned)
return W_learned, Xv_gt_train, S_train, Xv_gt_test, S_test
###################################
############### BMF ###############
###################################
def PMP_BMF(X_gt, rank, p_UV, is_training=False, V_learned=None):
"""Run the pertub-and-max-product algorithm for the BMF experiment."""
n_rows, n_cols = X_gt.shape
if not is_training:
assert V_learned is not None
assert V_learned.shape == (rank, n_cols)
###################################
### Step 1: Create factor graph ###
###################################
# Binary variables
U = vgroup.NDVarArray(num_states=2, shape=(n_rows, rank))
V = vgroup.NDVarArray(num_states=2, shape=(rank, n_cols))
# Auxiliary binary variables combining U and V
UV = vgroup.NDVarArray(num_states=2, shape=(n_rows, rank, n_cols))
# Binary images obtained by convolution
X = vgroup.NDVarArray(num_states=2, shape=X_gt.shape)
# Factor graph
fg = fgraph.FactorGraph(variable_groups=[U, V, UV, X])
# Define the LogicalFactors
variables_for_ANDFactors = []
variables_for_ORFactors = []
for idx_row in range(n_rows):
for idx_col in range(n_cols):
variables_for_ORFactor = []
for idx_rank in range(rank):
UV_var = UV[idx_row, idx_rank, idx_col]
variables_for_ANDFactor = [
U[idx_row, idx_rank],
V[idx_rank, idx_col],
UV_var,
]
variables_for_ANDFactors.append(variables_for_ANDFactor)
variables_for_ORFactor.append(UV_var)
variables_for_ORFactor = list(
tuple(variables_for_ORFactor) + (X[idx_row, idx_col],)
)
variables_for_ORFactors.append(variables_for_ORFactor)
AND_factor_group = fgroup.ANDFactorGroup(variables_for_ANDFactors)
OR_factor_group = fgroup.ORFactorGroup(variables_for_ORFactors)
# Add the two FactorGroups, which is computationally efficient
OR_factor_group = fgroup.ORFactorGroup(variables_for_ORFactors)
fg.add_factors([AND_factor_group, OR_factor_group])
###################################
#### Step 2: Train noisy OR BP ####
###################################
# BP functions
bp = infer.BP(fg.bp_state, temperature=0.0)
# Define the evidence
uV = np.zeros((V.shape) + (2,))
if is_training:
uV[..., 1] = scipy.special.logit(p_UV)
uV += np.random.gumbel(size=uV.shape)
else:
uV[..., 0] = (2 * V_learned - 1) * utils.CLIP_INF
# Sparsity inducing priors for W and S
uU = np.zeros((U.shape) + (2,))
uU[..., 1] = scipy.special.logit(p_UV)
# Likelihood the binary images given X
uX = np.zeros((X_gt.shape) + (2,))
uX[..., 0] = (2 * X_gt - 1) * utils.CLIP_INF
# Run BP
bp_arrays = bp.init(
evidence_updates={
U: uU + np.random.gumbel(size=uU.shape),
V: uV,
UV: np.zeros(shape=UV.shape),
X: uX,
}
)
bp_arrays = bp.run_bp(bp_arrays, num_iters=1000, damping=0.5)
beliefs = bp.get_beliefs(bp_arrays)
map_states = infer.decode_map_states(beliefs)
return map_states[U], map_states[V]
def run_BMF(seed, n_rows, rank, n_cols, p_Xon):
"""Run BMF for one seed."""
np.random.seed(seed)
# Note that p(Xv_ij=1) = p_X = 1 - (1 - p_UV** 2) ** rank
p_UV = (1 - (1 - p_Xon) ** (1.0 / rank)) ** 0.5
U_gt_test = np.random.binomial(n=1, p=p_UV, size=(n_rows, rank))
U_gt_train = np.random.binomial(n=1, p=p_UV, size=(n_rows, rank))
V_gt = np.random.binomial(n=1, p=p_UV, size=(rank, n_cols))
Xv_gt_train = U_gt_train.dot(V_gt)
Xv_gt_train[Xv_gt_train >= 1] = 1
Xv_gt_test = U_gt_test.dot(V_gt)
Xv_gt_test[Xv_gt_test >= 1] = 1
# Train
U_train, V_learned = PMP_BMF(Xv_gt_train, rank, p_UV, is_training=True)
U_test, _ = PMP_BMF(
Xv_gt_test, rank, p_UV, is_training=False, V_learned=V_learned
)
return V_learned, Xv_gt_train, U_train, Xv_gt_test, U_test
###################################
############## Train ###############
###################################
# pylint: disable=invalid-name
def train(_):
"""Train the noisy OR network on."""
config = _CONFIGS.value
dataset = config.dataset
# First extract the config for the dataset
if dataset == "BMF":
config_PMP = config.config_PMP_BMF
W_learned, Xv_gt_train, S_train, Xv_gt_test, S_test = run_BMF(**config_PMP)
elif dataset == "2D_deconvolution":
config_PMP = config.config_PMP_2Ddeconv
W_learned, Xv_gt_train, S_train, Xv_gt_test, S_test = run_2D_deconv(
**config_PMP
)
else:
raise ValueError("Unknown dataset", dataset)
if __name__ == "__main__":
app.run(train, load_cuda_libraries=False)
|
max_product_noisy_or-main
|
mp_noisy_or/pmp.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example script."""
import numpy as np
from mp_noisy_or import config
from mp_noisy_or import noisy_or_bp
from mp_noisy_or import results_utils
from absl import app
LP_THRE = np.log(2)
# pylint: disable=invalid-name
def run_bp_on_2D_blind_deconvolution(_):
"""Train a noisy OR Bayesian Network with Belief Propagation on the 2D blind deconvolution example and evaluate it."""
# Train noisy-OR Bayesian network with BP
this_config = config.get_config_BP_2Ddeconv()
# Here, we modify the default parameters to accelerate convergence
this_config.learning.num_iters = 600
this_config.learning.proba_init = 0.9
# Training should take 3min on a GPU
NoisyOR = noisy_or_bp.NoisyOR_BP(this_config)
results_BP = NoisyOR.train()
# Extract the log-potentials
log_potentials = np.array(results_BP["log_potentials"])[:5, :6, :6]
W_learned = (log_potentials > LP_THRE).astype(float)
# Compute metrics
print(f"After {this_config.learning.num_iters} training iterations")
# Test Elbo
test_avg_elbo_mode = results_BP["all_test_avg_elbos_mode"][-1]
print(f"Test elbo : {round(test_avg_elbo_mode, 3)}")
# Test reconstruction error
_, _, test_rec_ratio = results_utils.BD_reconstruction(
NoisyOR.Xv_gt_test, results_BP["test_X_samples"], W_learned
)
print(f"Test rec. error: {round(100 *test_rec_ratio, 3)}%")
if __name__ == "__main__":
app.run(run_bp_on_2D_blind_deconvolution)
|
max_product_noisy_or-main
|
examples/example.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'chex', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `chex/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='chex',
version=_get_version(),
url='https://github.com/deepmind/chex',
license='Apache 2.0',
author='DeepMind',
description=('Chex: Testing made fun, in JAX!'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='chex-dev@google.com',
keywords='jax testing debugging python machine learning',
packages=find_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
include_package_data=True,
python_requires='>=3.9',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Testing :: Mocking',
'Topic :: Software Development :: Testing :: Unit',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
chex-master
|
setup.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import inspect
import os
import sys
import typing
def _add_annotations_import(path):
"""Appends a future annotations import to the file at the given path."""
with open(path) as f:
contents = f.read()
if contents.startswith('from __future__ import annotations'):
# If we run sphinx multiple times then we will append the future import
# multiple times too.
return
assert contents.startswith('#'), (path, contents.split('\n')[0])
with open(path, 'w') as f:
# NOTE: This is subtle and not unit tested, we're prefixing the first line
# in each Python file with this future import. It is important to prefix
# not insert a newline such that source code locations are accurate (we link
# to GitHub). The assertion above ensures that the first line in the file is
# a comment so it is safe to prefix it.
f.write('from __future__ import annotations ')
f.write(contents)
def _recursive_add_annotations_import():
for path, _, files in os.walk('../chex/'):
for file in files:
if file.endswith('.py'):
_add_annotations_import(os.path.abspath(os.path.join(path, file)))
if 'READTHEDOCS' in os.environ:
_recursive_add_annotations_import()
# TODO(b/254461517) Remove the annotation filtering when we drop Python 3.8
# support.
# We remove `None` type annotations as this breaks Sphinx under Python 3.7 and
# 3.8 with error `AssertionError: Invalid annotation [...] None is not a class.`
filter_nones = lambda x: dict((k, v) for k, v in x.items() if v is not None)
typing.get_type_hints = lambda obj, *unused: filter_nones(obj.__annotations__)
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
import chex
from sphinxcontrib import katex
# -- Project information -----------------------------------------------------
project = 'Chex'
copyright = '2021, DeepMind' # pylint: disable=redefined-builtin
author = 'Chex Contributors'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinxcontrib.katex',
'sphinx_autodoc_typehints',
'sphinx_rtd_theme',
'coverage_check',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# html_favicon = '_static/favicon.ico'
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = 'macros: {' + katex_macros + '}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
# TODO(slebedev): support tags after we release an initial version.
return 'https://github.com/deepmind/chex/tree/master/chex/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
chex.__file__)), lineno, lineno + len(source) - 1)
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'jax': ('https://jax.readthedocs.io/en/latest/', None),
}
source_suffix = ['.rst', '.md', '.ipynb']
|
chex-master
|
docs/conf.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asserts all public symbols are covered in the docs."""
import inspect
import types
from typing import Any, Mapping, Sequence, Tuple
import chex as _module
from sphinx import application
from sphinx import builders
from sphinx import errors
def find_internal_python_modules(
root_module: types.ModuleType,) -> Sequence[Tuple[str, types.ModuleType]]:
"""Returns `(name, module)` for all submodules under `root_module`."""
modules = set([(root_module.__name__, root_module)])
visited = set()
to_visit = [root_module]
while to_visit:
mod = to_visit.pop()
visited.add(mod)
for name in dir(mod):
obj = getattr(mod, name)
if inspect.ismodule(obj) and obj not in visited:
if obj.__name__.startswith(_module.__name__):
if '_src' not in obj.__name__:
to_visit.append(obj)
modules.add((obj.__name__, obj))
return sorted(modules)
def get_public_symbols() -> Sequence[Tuple[str, types.ModuleType]]:
names = set()
for module_name, module in find_internal_python_modules(_module):
for name in module.__all__:
names.add(module_name + '.' + name)
return tuple(names)
class CoverageCheck(builders.Builder):
"""Builder that checks all public symbols are included."""
name = 'coverage_check'
def get_outdated_docs(self) -> str:
return 'coverage_check'
def write(self, *ignored: Any) -> None:
pass
def finish(self) -> None:
documented_objects = frozenset(self.env.domaindata['py']['objects'])
undocumented_objects = set(get_public_symbols()) - documented_objects
# Exclude deprecated API symbols.
assertion_exceptions = ('assert_tree_all_close',
'assert_tree_all_equal_comparator',
'assert_tree_all_equal_shapes',
'assert_tree_all_equal_structs')
undocumented_objects -= {'chex.' + s for s in assertion_exceptions}
# Exclude pytypes.
pytypes_exceptions = (
'Array',
'ArrayBatched',
'Array',
'ArrayBatched',
'ArrayDevice',
'ArrayDeviceTree',
'ArrayDType',
'ArrayNumpy',
'ArrayNumpyTree',
'ArraySharded',
'ArrayTree',
'Device',
'Numeric',
'PRNGKey',
'PyTreeDef',
'Scalar',
'Shape',
)
# Exclude public constants.
pytypes_exceptions += ('ChexifyChecks',)
undocumented_objects -= {'chex.' + s for s in pytypes_exceptions}
if undocumented_objects:
undocumented_objects = tuple(sorted(undocumented_objects))
raise errors.SphinxError(
'All public symbols must be included in our documentation, did you '
'forget to add an entry to `api.rst`?\n'
f'Undocumented symbols: {undocumented_objects}')
def setup(app: application.Sphinx) -> Mapping[str, Any]:
app.add_builder(CoverageCheck)
return dict(version=_module.__version__, parallel_read_safe=True)
|
chex-master
|
docs/ext/coverage_check.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chex: Testing made fun, in JAX!"""
from chex._src.asserts import assert_axis_dimension
from chex._src.asserts import assert_axis_dimension_comparator
from chex._src.asserts import assert_axis_dimension_gt
from chex._src.asserts import assert_axis_dimension_gteq
from chex._src.asserts import assert_axis_dimension_lt
from chex._src.asserts import assert_axis_dimension_lteq
from chex._src.asserts import assert_devices_available
from chex._src.asserts import assert_equal
from chex._src.asserts import assert_equal_rank
from chex._src.asserts import assert_equal_shape
from chex._src.asserts import assert_equal_shape_prefix
from chex._src.asserts import assert_equal_shape_suffix
from chex._src.asserts import assert_equal_size
from chex._src.asserts import assert_exactly_one_is_none
from chex._src.asserts import assert_gpu_available
from chex._src.asserts import assert_is_broadcastable
from chex._src.asserts import assert_is_divisible
from chex._src.asserts import assert_max_traces
from chex._src.asserts import assert_not_both_none
from chex._src.asserts import assert_numerical_grads
from chex._src.asserts import assert_rank
from chex._src.asserts import assert_scalar
from chex._src.asserts import assert_scalar_in
from chex._src.asserts import assert_scalar_negative
from chex._src.asserts import assert_scalar_non_negative
from chex._src.asserts import assert_scalar_positive
from chex._src.asserts import assert_shape
from chex._src.asserts import assert_size
from chex._src.asserts import assert_tpu_available
from chex._src.asserts import assert_tree_all_finite
from chex._src.asserts import assert_tree_has_only_ndarrays
from chex._src.asserts import assert_tree_is_on_device
from chex._src.asserts import assert_tree_is_on_host
from chex._src.asserts import assert_tree_is_sharded
from chex._src.asserts import assert_tree_no_nones
from chex._src.asserts import assert_tree_shape_prefix
from chex._src.asserts import assert_tree_shape_suffix
from chex._src.asserts import assert_trees_all_close
from chex._src.asserts import assert_trees_all_close_ulp
from chex._src.asserts import assert_trees_all_equal
from chex._src.asserts import assert_trees_all_equal_comparator
from chex._src.asserts import assert_trees_all_equal_dtypes
from chex._src.asserts import assert_trees_all_equal_shapes
from chex._src.asserts import assert_trees_all_equal_shapes_and_dtypes
from chex._src.asserts import assert_trees_all_equal_sizes
from chex._src.asserts import assert_trees_all_equal_structs
from chex._src.asserts import assert_type
from chex._src.asserts import clear_trace_counter
from chex._src.asserts import disable_asserts
from chex._src.asserts import enable_asserts
from chex._src.asserts import if_args_not_none
from chex._src.asserts_chexify import block_until_chexify_assertions_complete
from chex._src.asserts_chexify import chexify
from chex._src.asserts_chexify import ChexifyChecks
from chex._src.asserts_chexify import with_jittable_assertions
from chex._src.dataclass import dataclass
from chex._src.dataclass import mappable_dataclass
from chex._src.dataclass import register_dataclass_type_with_jax_tree_util
from chex._src.dimensions import Dimensions
from chex._src.fake import fake_jit
from chex._src.fake import fake_pmap
from chex._src.fake import fake_pmap_and_jit
from chex._src.fake import set_n_cpu_devices
from chex._src.pytypes import Array
from chex._src.pytypes import ArrayBatched
from chex._src.pytypes import ArrayDevice
from chex._src.pytypes import ArrayDeviceTree
from chex._src.pytypes import ArrayDType
from chex._src.pytypes import ArrayNumpy
from chex._src.pytypes import ArrayNumpyTree
from chex._src.pytypes import ArraySharded
from chex._src.pytypes import ArrayTree
from chex._src.pytypes import Device
from chex._src.pytypes import Numeric
from chex._src.pytypes import PRNGKey
from chex._src.pytypes import PyTreeDef
from chex._src.pytypes import Scalar
from chex._src.pytypes import Shape
from chex._src.restrict_backends import restrict_backends
from chex._src.variants import all_variants
from chex._src.variants import ChexVariantType
from chex._src.variants import params_product
from chex._src.variants import TestCase
from chex._src.variants import variants
__version__ = "0.1.82"
__all__ = (
"all_variants",
"Array",
"ArrayBatched",
"ArrayDevice",
"ArrayDeviceTree",
"ArrayDType",
"ArrayNumpy",
"ArrayNumpyTree",
"ArraySharded",
"ArrayTree",
"ChexifyChecks",
"assert_axis_dimension",
"assert_axis_dimension_comparator",
"assert_axis_dimension_gt",
"assert_axis_dimension_gteq",
"assert_axis_dimension_lt",
"assert_axis_dimension_lteq",
"assert_devices_available",
"assert_equal",
"assert_equal_rank",
"assert_equal_shape",
"assert_equal_shape_prefix",
"assert_equal_shape_suffix",
"assert_equal_size",
"assert_exactly_one_is_none",
"assert_gpu_available",
"assert_is_broadcastable",
"assert_is_divisible",
"assert_max_traces",
"assert_not_both_none",
"assert_numerical_grads",
"assert_rank",
"assert_scalar",
"assert_scalar_in",
"assert_scalar_negative",
"assert_scalar_non_negative",
"assert_scalar_positive",
"assert_shape",
"assert_size",
"assert_tpu_available",
"assert_tree_all_finite",
"assert_tree_has_only_ndarrays",
"assert_tree_is_on_device",
"assert_tree_is_on_host",
"assert_tree_is_sharded",
"assert_tree_no_nones",
"assert_tree_shape_prefix",
"assert_tree_shape_suffix",
"assert_trees_all_close",
"assert_trees_all_close_ulp",
"assert_trees_all_equal",
"assert_trees_all_equal_comparator",
"assert_trees_all_equal_dtypes",
"assert_trees_all_equal_shapes",
"assert_trees_all_equal_shapes_and_dtypes",
"assert_trees_all_equal_sizes",
"assert_trees_all_equal_structs",
"assert_type",
"block_until_chexify_assertions_complete",
"chexify",
"ChexVariantType",
"clear_trace_counter",
"dataclass",
"Device",
"Dimensions",
"disable_asserts",
"enable_asserts",
"fake_jit",
"fake_pmap",
"fake_pmap_and_jit",
"if_args_not_none",
"mappable_dataclass",
"Numeric",
"params_product",
"PRNGKey",
"PyTreeDef",
"register_dataclass_type_with_jax_tree_util",
"restrict_backends",
"Scalar",
"set_n_cpu_devices",
"Shape",
"TestCase",
"variants",
"with_jittable_assertions",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Chex public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
|
chex-master
|
chex/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for chex."""
from absl.testing import absltest
import chex
class ChexTest(absltest.TestCase):
"""Test chex can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(chex, 'assert_devices_available'))
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/chex_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for `set_n_cpu_devices` from `fake.py`.
This test is isolated to ensure hermeticity because its execution changes
XLA backend configuration.
"""
import unittest
from absl.testing import absltest
from chex._src import asserts
from chex._src import fake
class DevicesSetterTest(absltest.TestCase):
def test_set_n_cpu_devices(self):
try:
# Should not initialize backends.
fake.set_n_cpu_devices(4)
except RuntimeError as set_cpu_exception:
raise unittest.SkipTest(
"set_n_cpu_devices: backend's already been initialized. "
'Run this test in isolation from others.') from set_cpu_exception
# Hence, this one does not fail.
fake.set_n_cpu_devices(6)
# This assert initializes backends.
asserts.assert_devices_available(6, 'cpu', backend='cpu')
# Which means that next call must fail.
with self.assertRaisesRegex(RuntimeError,
'Attempted to set 8 devices, but 6 CPUs.+'):
fake.set_n_cpu_devices(8)
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/fake_set_n_cpu_devices_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `asserts_internal.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts_internal as ai
from chex._src import variants
import jax
import jax.numpy as jnp
class IsTraceableTest(variants.TestCase):
@variants.variants(with_jit=True, with_pmap=True)
def test_is_traceable(self):
def dummy_wrapper(fn):
@functools.wraps(fn)
def fn_wrapped(fn, *args):
return fn(args)
return fn_wrapped
fn = lambda x: x.sum()
wrapped_fn = dummy_wrapper(fn)
self.assertFalse(ai.is_traceable(fn))
self.assertFalse(ai.is_traceable(wrapped_fn))
var_fn = self.variant(fn)
wrapped_var_f = dummy_wrapper(var_fn)
var_wrapped_f = self.variant(wrapped_fn)
self.assertTrue(ai.is_traceable(var_fn))
self.assertTrue(ai.is_traceable(wrapped_var_f))
self.assertTrue(ai.is_traceable(var_wrapped_f))
class ExceptionMessageFormatTest(variants.TestCase):
@parameterized.product(
include_default_msg=(False, True),
include_custom_msg=(False, True),
exc_type=(AssertionError, ValueError),
)
def test_format(self, include_default_msg, include_custom_msg, exc_type):
exc_msg = lambda x: f'{x} is non-positive.'
@functools.partial(ai.chex_assertion, jittable_assert_fn=None)
def assert_positive(x):
if x <= 0:
raise AssertionError(exc_msg(x))
@functools.partial(ai.chex_assertion, jittable_assert_fn=None)
def assert_each_positive(*args):
for x in args:
assert_positive(x)
# Pass.
assert_positive(1)
assert_each_positive(1, 2, 3)
# Check the format of raised exceptions' messages.
def expected_exc_msg(x, custom_msg):
msg = exc_msg(x) if include_default_msg else ''
msg = rf'{msg} \[{custom_msg}\]' if custom_msg else msg
return msg
# Run in a loop to generate different custom messages.
for i in range(3):
custom_msg = f'failed at iter {i}' if include_custom_msg else ''
with self.assertRaisesRegex(
exc_type, ai.get_err_regex(expected_exc_msg(-1, custom_msg))):
assert_positive( # pylint:disable=unexpected-keyword-arg
-1,
custom_message=custom_msg,
include_default_message=include_default_msg,
exception_type=exc_type)
with self.assertRaisesRegex(
exc_type, ai.get_err_regex(expected_exc_msg(-3, custom_msg))):
assert_each_positive( # pylint:disable=unexpected-keyword-arg
1,
-3,
2,
custom_message=custom_msg,
include_default_message=include_default_msg,
exception_type=exc_type)
class JitCompatibleTest(variants.TestCase):
def test_api(self):
def assert_fn(x):
if x.shape != (2,):
raise AssertionError(f'shape != (2,) {x.shape}!')
for transform_fn in (jax.jit, jax.grad, jax.vmap):
x_ok = jnp.ones((2,))
x_wrong = jnp.ones((3,))
is_vmap = transform_fn is jax.vmap
if is_vmap:
x_ok, x_wrong = (jnp.expand_dims(x, 0) for x in (x_ok, x_wrong))
# Jax-compatible.
assert_compat_fn = ai.chex_assertion(assert_fn, jittable_assert_fn=None)
def compat_fn(x, assertion=assert_compat_fn):
assertion(x)
return x.sum()
if not is_vmap:
compat_fn(x_ok)
transform_fn(compat_fn)(x_ok)
with self.assertRaisesRegex(AssertionError, 'shape !='):
transform_fn(compat_fn)(x_wrong)
# JAX-incompatible.
assert_incompat_fn = ai.chex_assertion(
assert_fn, jittable_assert_fn=assert_fn)
def incompat_fn(x, assertion=assert_incompat_fn):
assertion(x)
return x.sum()
if not is_vmap:
incompat_fn(x_ok)
with self.assertRaisesRegex(RuntimeError,
'Value assertions can only be called from'):
transform_fn(incompat_fn)(x_wrong)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
|
chex-master
|
chex/_src/asserts_internal_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chex assertion internal utilities and symbols.
[README!]
We reserve the right to change the code in this module at any time without
providing any guarantees of backward compatibility. For this reason,
we strongly recommend that you avoid using this module directly at all costs!
Instead, consider opening an issue on GitHub and describing your use case.
"""
import collections
import collections.abc
import functools
import re
import threading
import traceback
from typing import Any, Sequence, Union, Callable, Hashable, List, Optional, Set, Tuple, Type
from absl import logging
from chex._src import pytypes
import jax
from jax.experimental import checkify
import jax.numpy as jnp
import numpy as np
# Custom pytypes.
TLeaf = Any
TLeavesEqCmpFn = Callable[[TLeaf, TLeaf], bool]
TLeavesEqCmpErrorFn = Callable[[TLeaf, TLeaf], str]
# TODO(iukemaev): define a typing protocol for TChexAssertion.
# Chex assertion signature:
# (*args,
# custom_message: Optional[str] = None,
# custom_message_format_vars: Sequence[Any] = (),
# include_default_message: bool = True,
# exception_type: Type[Exception] = AssertionError,
# **kwargs)
TChexAssertion = Callable[..., None]
TAssertFn = Callable[..., None]
TJittableAssertFn = Callable[..., pytypes.Array] # a predicate function
# Matchers.
TDimMatcher = Optional[Union[int, Set[int], type(Ellipsis)]]
TShapeMatcher = Sequence[TDimMatcher]
class _ChexifyStorage(threading.local):
"""Thread-safe storage for internal variables used in @chexify."""
wait_fns = []
level = 0
# Chex namespace variables.
ERR_PREFIX = "[Chex] "
TRACE_COUNTER = collections.Counter()
DISABLE_ASSERTIONS = False
# This variable is used for _chexify_ transformations, see `asserts_chexify.py`.
CHEXIFY_STORAGE = _ChexifyStorage()
def assert_collection_of_arrays(inputs: Sequence[pytypes.Array]):
"""Checks if ``inputs`` is a collection of arrays."""
if not isinstance(inputs, collections.abc.Collection):
raise ValueError(f"`inputs` is not a collection of arrays: {inputs}.")
def jnp_to_np_array(arr: pytypes.Array) -> np.ndarray:
"""Converts `jnp.ndarray` to `np.ndarray`."""
if getattr(arr, "dtype", None) == jnp.bfloat16:
# Numpy does not support `bfloat16`.
arr = arr.astype(jnp.float32)
return jax.device_get(arr)
def deprecation_wrapper(new_fn, old_name, new_name):
"""Allows deprecated functions to continue running, with a warning logged."""
def inner_fn(*args, **kwargs):
logging.warning(
"chex.%s has been renamed to chex.%s, please update your code.",
old_name, new_name)
return new_fn(*args, **kwargs)
return inner_fn
def get_stacktrace_without_chex_internals() -> List[traceback.FrameSummary]:
"""Returns the latest non-chex frame from the call stack."""
stacktrace = list(traceback.extract_stack())
for i in reversed(range(len(stacktrace))):
fname = stacktrace[i].filename
if fname.find("/chex/") == -1 or fname.endswith("_test.py"):
return stacktrace[:i+1]
debug_info = "\n-----\n".join(traceback.format_stack())
raise RuntimeError(
"get_stacktrace_without_chex_internals() failed. "
"Please file a bug at https://github.com/deepmind/chex/issues and "
"include the following debug info in it. "
"Please make sure it does not include any private information! "
f"Debug: '{debug_info}'.")
def get_err_regex(message: str) -> str:
"""Constructs a regexp for the exception message.
Args:
message: an exception message.
Returns:
Regexp that ensures the message follows the standard chex formatting.
"""
# (ERR_PREFIX + any symbols (incl. \n) + message)
return f"{re.escape(ERR_PREFIX)}[\\s\\S]*{message}"
def get_chexify_err_message(name: str, msg: str = "") -> str:
"""Constructs an error message for the chexify exception."""
return f"{ERR_PREFIX}chexify assertion '{name}' failed: {msg}"
def _make_host_assertion(assert_fn: TAssertFn,
name: Optional[str] = None) -> TChexAssertion:
"""Constructs a host assertion given `assert_fn`.
This wrapper should only be applied to the assertions that are either
a) never used in jitted code, or
b) when used in jitted code they do not check/access tensor values (i.e.
they do not introduce value-dependent python control flow, see
https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError).
Args:
assert_fn: A function implementing the check.
name: A name for assertion.
Returns:
A chex assertion.
"""
if name is None:
name = assert_fn.__name__
def _assert_on_host(*args,
custom_message: Optional[str] = None,
custom_message_format_vars: Sequence[Any] = (),
include_default_message: bool = True,
exception_type: Type[Exception] = AssertionError,
**kwargs) -> None:
# Format error's stack trace to remove Chex' internal frames.
assertion_exc = None
value_exc = None
try:
assert_fn(*args, **kwargs)
except AssertionError as e:
assertion_exc = e
except ValueError as e:
value_exc = e
finally:
if value_exc is not None:
raise ValueError(str(value_exc))
if assertion_exc is not None:
# Format the exception message.
error_msg = str(assertion_exc)
# Include only the name of the outermost chex assertion.
if error_msg.startswith(ERR_PREFIX):
error_msg = error_msg[error_msg.find("failed:") + len("failed:"):]
# Whether to include the default error message.
default_msg = (f"Assertion {name} failed: "
if include_default_message else "")
error_msg = f"{ERR_PREFIX}{default_msg}{error_msg}"
# Whether to include a custom error message.
if custom_message:
if custom_message_format_vars:
custom_message = custom_message.format(*custom_message_format_vars)
error_msg = f"{error_msg} [{custom_message}]"
raise exception_type(error_msg)
return _assert_on_host
def chex_assertion(
assert_fn: TAssertFn,
jittable_assert_fn: Optional[TJittableAssertFn],
name: Optional[str] = None) -> TChexAssertion:
"""Wraps Chex assert functions to control their common behaviour.
Extends the assertion to support the following optional auxiliary kwargs:
custom_message: A string to include into the emitted exception messages.
custom_message_format_vars: A list of variables to pass as arguments to
`custom_message.format()`.
include_default_message: Whether to include the default Chex message into
the emitted exception messages.
exception_type: An exception type to use. `AssertionError` by default.
Args:
assert_fn: A function implementing the check.
jittable_assert_fn: An optional jittable version of `assert_fn` implementing
a predicate (returning `True` only if assertion passes).
Required for value assertions.
name: A name for assertion. If not provided, use `assert_fn.__name__`.
Returns:
A Chex assertion (with auxiliary kwargs).
"""
if name is None:
name = assert_fn.__name__
host_assertion_fn = _make_host_assertion(assert_fn, name)
@functools.wraps(assert_fn)
def _chex_assert_fn(*args,
custom_message: Optional[str] = None,
custom_message_format_vars: Sequence[Any] = (),
include_default_message: bool = True,
exception_type: Type[Exception] = AssertionError,
**kwargs) -> None:
if DISABLE_ASSERTIONS:
return
if (jittable_assert_fn is not None and has_tracers((args, kwargs))):
if not CHEXIFY_STORAGE.level:
raise RuntimeError(
"Value assertions can only be called from functions wrapped "
"with `@chex.chexify`. See the docs.")
# A wrapped to inject auxiliary debug info and `custom_message`.
original_check = checkify.check
def _check(pred, msg, *fmt_args, **fmt_kwargs):
# Add chex info.
msg = get_chexify_err_message(name, msg)
# Add a custom message.
if custom_message:
msg += f" Custom message: {custom_message}."
fmt_args = list(fmt_args) + list(custom_message_format_vars)
# Add a traceback and a pointer to the callsite.
stacktrace = get_stacktrace_without_chex_internals()
msg += (
f" [failed at: {stacktrace[-1].filename}:{stacktrace[-1].lineno}]"
)
# Call original `checkify.check()`.
original_check(pred, msg, *fmt_args, **fmt_kwargs)
# Mock during the assertion's execution time.
checkify.check = _check
pred = jittable_assert_fn(*args, **kwargs) # execute the assertion
checkify.check = original_check # return the original implementation
# A safeguard to ensure that the results of a check are not ignored.
# In particular, this check fails when `pred` is False and no
# `checkify.check` calls took place in `jittable_assert_fn`, which would
# be a bug in the assertion's implementation.
checkify.check(pred, "assertion failed!")
else:
try:
host_assertion_fn(
*args,
custom_message=custom_message,
custom_message_format_vars=custom_message_format_vars,
include_default_message=include_default_message,
exception_type=exception_type,
**kwargs)
except jax.errors.ConcretizationTypeError as exc:
msg = ("Chex assertion detected `ConcretizationTypeError`: it is very "
"likely that it tried to access tensors' values during tracing. "
"Make sure that you defined a jittable version of this chex "
"assertion; if that does not help, please file a bug.")
raise exc from RuntimeError(msg)
# Override name.
setattr(_chex_assert_fn, "__name__", name)
return _chex_assert_fn
def format_tree_path(path: Sequence[Any]) -> str:
return "/".join(str(p) for p in path)
def format_shape_matcher(shape: TShapeMatcher) -> str:
return f"({', '.join('...' if d is Ellipsis else str(d) for d in shape)})"
def num_devices_available(devtype: str, backend: Optional[str] = None) -> int:
"""Returns the number of available device of the given type."""
devtype = devtype.lower()
supported_types = ("cpu", "gpu", "tpu")
if devtype not in supported_types:
raise ValueError(
f"Unknown device type '{devtype}' (expected one of {supported_types}).")
return sum(d.platform == devtype for d in jax.devices(backend))
def get_tracers(tree: pytypes.ArrayTree) -> Tuple[jax.core.Tracer]:
"""Returns a tuple with tracers from a tree."""
return tuple(
x for x in jax.tree_util.tree_leaves(tree)
if isinstance(x, jax.core.Tracer))
def has_tracers(tree: pytypes.ArrayTree) -> bool:
"""Checks whether a tree contains any tracers."""
return any(
isinstance(x, jax.core.Tracer) for x in jax.tree_util.tree_leaves(tree))
def is_traceable(fn) -> bool:
"""Checks if function is traceable.
JAX traces a function when it is wrapped with @jit, @pmap, or @vmap.
In other words, this function checks whether `fn` is wrapped with any of
the aforementioned JAX transformations.
Args:
fn: function to assert.
Returns:
Bool indicating whether fn is traceable.
"""
fn_string_tokens = (
".reraise_with_filtered_traceback", # JIT in Python ver. >= 3.7
"CompiledFunction", # C++ JIT in jaxlib 0.1.66 or newer.
"pmap.", # Python pmap
"PmapFunction", # C++ pmap in jaxlib 0.1.72 or newer.
"vmap.", # vmap
"_python_pjit",
"_cpp_pjit",
)
fn_type_tokens = (
"PmapFunction",
"PjitFunction",
)
# Un-wrap `fn` and check if any internal fn is jitted by pattern matching.
fn_ = fn
while True:
if any(t in str(fn_) for t in fn_string_tokens):
return True
if any(t in str(type(fn_)) for t in fn_type_tokens):
return True
if hasattr(fn_, "__wrapped__"):
# Wrapper.
fn_globals = getattr(fn_, "__globals__", {})
if fn_globals.get("__name__", None) == "jax.api":
# Wrapper from `jax.api`.
return True
if "api_boundary" in fn_globals:
# api_boundary is a JAX wrapper for traced functions.
return True
try:
if isinstance(fn_, jax.lib.xla_extension.PjitFunction):
return True
except AttributeError:
pass
else:
break
fn_ = fn_.__wrapped__
return False
def assert_leaves_all_eq_comparator(
equality_comparator: TLeavesEqCmpFn,
error_msg_fn: Callable[[TLeaf, TLeaf, str, int, int],
str], path: Sequence[Any], *leaves: Sequence[TLeaf]):
"""Asserts all leaves are equal using custom comparator. Not jittable."""
path_str = format_tree_path(path)
for i in range(1, len(leaves)):
if not equality_comparator(leaves[0], leaves[i]):
raise AssertionError(error_msg_fn(leaves[0], leaves[i], path_str, 0, i))
def assert_trees_all_eq_comparator_jittable(
equality_comparator: TLeavesEqCmpFn,
error_msg_template: str,
*trees: Sequence[pytypes.ArrayTree]) -> pytypes.Array:
"""Asserts all trees are equal using custom comparator. JIT-friendly."""
if len(trees) < 2:
raise ValueError(
"Assertions over only one tree does not make sense. Maybe you wrote "
"`assert_trees_xxx([a, b])` instead of `assert_trees_xxx(a, b)`, or "
"forgot the `error_msg_fn` arg to `assert_trees_xxx`?")
def _tree_error_msg_fn(
path: Tuple[Union[int, str, Hashable]], i_1: int, i_2: int):
if path:
return (
f"Trees {i_1} and {i_2} differ in leaves '{path}':"
f" {error_msg_template}"
)
else:
return f"Trees (arrays) {i_1} and {i_2} differ: {error_msg_template}."
def _cmp_leaves(path, *leaves):
verdict = jnp.array(True)
for i in range(1, len(leaves)):
check_res = equality_comparator(leaves[0], leaves[i])
checkify.check(
pred=check_res,
msg=_tree_error_msg_fn(path, 0, i),
arr_1=leaves[0],
arr_2=leaves[i],
)
verdict = jnp.logical_and(verdict, check_res)
return verdict
# Trees are guaranteed to have the same structure.
paths = [
convert_jax_path_to_dm_path(path)
for path, _ in jax.tree_util.tree_flatten_with_path(trees[0])[0]]
trees_leaves = [jax.tree_util.tree_leaves(tree) for tree in trees]
verdict = jnp.array(True)
for leaf_i, path in enumerate(paths):
verdict = jnp.logical_and(
verdict, _cmp_leaves(path, *[leaves[leaf_i] for leaves in trees_leaves])
)
return verdict
JaxKeyType = Union[
int,
str,
Hashable,
jax.tree_util.SequenceKey,
jax.tree_util.DictKey,
jax.tree_util.FlattenedIndexKey,
jax.tree_util.GetAttrKey,
]
def convert_jax_path_to_dm_path(
jax_tree_path: Sequence[JaxKeyType],
) -> Tuple[Union[int, str, Hashable]]:
"""Converts a path from jax.tree_util to one from dm-tree."""
# pytype:disable=attribute-error
def _convert_key_fn(key: JaxKeyType) -> Union[int, str, Hashable]:
if isinstance(key, (str, int)):
return key # int | str.
if isinstance(key, jax.tree_util.SequenceKey):
return key.idx # int.
if isinstance(key, jax.tree_util.DictKey):
return key.key # Hashable
if isinstance(key, jax.tree_util.FlattenedIndexKey):
return key.key # int.
if isinstance(key, jax.tree_util.GetAttrKey):
return key.name # str.
raise ValueError(f"Jax tree key '{key}' of type '{type(key)}' not valid.")
# pytype:enable=attribute-error
return tuple(_convert_key_fn(key) for key in jax_tree_path)
|
chex-master
|
chex/_src/asserts_internal.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chex variants utilities."""
import enum
import functools
import inspect
import itertools
from typing import Any, Sequence
import unittest
from absl import flags
from absl.testing import parameterized
from chex._src import fake
from chex._src import pytypes
import jax
from jax import tree_util
import jax.numpy as jnp
import toolz
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"chex_skip_pmap_variant_if_single_device", True,
"Whether to skip pmap variant if only one device is available.")
# We choose to subclass instead of a simple alias, as Python doesn't allow
# multiple inheritance from the same class, and users may want to subclass their
# tests from both `chex.TestCase` and `parameterized.TestCase`.
#
# User is free to use any base class that supports generators unrolling
# instead of `variants.TestCase` or `parameterized.TestCase`. If a base class
# doesn't support this feature variant test fails with a corresponding error.
class TestCase(parameterized.TestCase):
"""A class for Chex tests that use variants.
See the docstring for ``chex.variants`` for more information.
Note: ``chex.variants`` returns a generator producing one test per variant.
Therefore, the used test class must support dynamic unrolling of these
generators during module import. It is implemented (and battle-tested) in
``absl.parameterized.TestCase``, and here we subclass from it.
"""
def variant(self, *args, **kwargs):
"""Raises a RuntimeError if not overriden or redefined."""
raise RuntimeError(
"self.variant is not defined: forgot to wrap a test in @chex.variants?")
class ChexVariantType(enum.Enum):
"""An enumeration of available Chex variants.
Use ``self.variant.type`` to get type of the current test variant.
See the docstring of ``chex.variants`` for more information.
"""
WITH_JIT = 1
WITHOUT_JIT = 2
WITH_DEVICE = 3
WITHOUT_DEVICE = 4
WITH_PMAP = 5
def __str__(self) -> str:
return "_" + self.name.lower()
tree_map = tree_util.tree_map
def params_product(*params_lists: Sequence[Sequence[Any]],
named: bool = False) -> Sequence[Sequence[Any]]:
"""Generates a cartesian product of `params_lists`.
See tests from ``variants_test.py`` for examples of usage.
Args:
*params_lists: A list of params combinations.
named: Whether to generate test names (for
`absl.parameterized.named_parameters(...)`).
Returns:
A cartesian product of `params_lists` combinations.
"""
def generate():
for combination in itertools.product(*params_lists):
if named:
name = "_".join(t[0] for t in combination)
args_tuples = (t[1:] for t in combination)
args = sum(args_tuples, ())
yield (name, *args)
else:
yield sum(combination, ())
return list(generate())
def count_num_calls(fn):
"""Counts the number of times the function was called."""
num_calls = 0
@functools.wraps(fn)
def fn_wrapped(*args, **kwargs):
nonlocal num_calls
num_calls += 1
return fn(*args, **kwargs)
return fn_wrapped, lambda: num_calls
class VariantsTestCaseGenerator:
"""TestCase generator for chex variants. Supports sharding."""
def __init__(self, test_object, which_variants):
self._which_variants = which_variants
self._generated_names_freq = {}
if hasattr(test_object, "__iter__"):
# `test_object` is a generator (e.g. parameterised test).
self._test_methods = list(test_object)
else:
# `test_object` is a single test method.
self._test_methods = [test_object]
def add_variants(self, which_variants):
"""Merge variants."""
for var, incl in which_variants.items():
self._which_variants[var] = self._which_variants.get(var, False) or incl
@property
def __name__(self):
msg = ("A test wrapper attempts to access __name__ of "
"VariantsTestCaseGenerator. Usually, this happens when "
"@parameterized wraps @variants.variants. Make sure that the "
"@variants.variants wrapper is an outer one, i.e. nothing wraps it.")
raise RuntimeError(msg)
def __call__(self):
msg = ("A test wrapper attempts to invoke __call__ of "
"VariantsTestCaseGenerator: make sure that all `TestCase` instances "
"that use variants inherit from `chex.TestCase`.")
raise RuntimeError(msg)
def _set_test_name(self, test_method, variant):
"""Set a name for the generated test."""
name = getattr(test_method, "__name__", "")
params_repr = getattr(test_method, "__x_params_repr__", "")
chex_suffix = f"{variant}"
candidate_name = "_".join(filter(None, [name, params_repr, chex_suffix]))
name_freq = self._generated_names_freq.get(candidate_name, 0)
if name_freq:
# Ensure that test names are unique.
new_name = name + "_" + str(name_freq)
unique_name = "_".join(filter(None, [new_name, params_repr, chex_suffix]))
else:
unique_name = candidate_name
self._generated_names_freq[candidate_name] = name_freq + 1
# Always use name for compatibility with `absl.testing.parameterized`.
setattr(test_method, "__name__", unique_name)
setattr(test_method, "__x_params_repr__", "")
setattr(test_method, "__x_use_name__", True)
return test_method
def _inner_iter(self, test_method):
"""Generate chex variants for a single test."""
def make_test(variant: ChexVariantType):
@functools.wraps(test_method)
def test(self, *args, **kwargs):
# Skip pmap variant if only one device is available.
if (variant is ChexVariantType.WITH_PMAP and
FLAGS["chex_skip_pmap_variant_if_single_device"].value and
jax.device_count() < 2):
raise unittest.SkipTest(
f"Only 1 device is available ({jax.devices()}).")
# n_cpu_devices assert.
if FLAGS["chex_assert_multiple_cpu_devices"].value:
required_n_cpus = fake.get_n_cpu_devices_from_xla_flags()
if required_n_cpus < 2:
raise RuntimeError(
f"Required number of CPU devices is {required_n_cpus} < 2."
"Consider setting up your test module to use multiple CPU "
" devices (see README.md) or disabling "
"`chex_assert_multiple_cpu_devices` flag.")
available_n_cpus = jax.device_count("cpu")
if required_n_cpus != available_n_cpus:
raise RuntimeError(
"Number of available CPU devices is not equal to the required: "
f"{available_n_cpus} != {required_n_cpus}")
# Set up the variant.
self.variant, num_calls = count_num_calls(_variant_decorators[variant])
self.variant.type = variant
res = test_method(self, *args, **kwargs)
if num_calls() == 0:
raise RuntimeError(
"Test is wrapped in @chex.variants, but never calls self.variant."
" Consider debugging the test or removing @chex.variants wrapper."
f" (variant: {variant})")
return res
self._set_test_name(test, variant)
return test
selected_variants = [
var_name for var_name, is_included in self._which_variants.items()
if is_included
]
if not selected_variants:
raise ValueError(f"No variants selected for test: {test_method}.")
return (make_test(var_name) for var_name in selected_variants)
def __iter__(self):
"""Generate chex variants for each test case."""
return itertools.chain(*(self._inner_iter(m) for m in self._test_methods))
@toolz.curry
def _variants_fn(test_object, **which_variants) -> VariantsTestCaseGenerator:
"""Implements `variants` and `all_variants`."""
# Convert keys to enum entries.
which_variants = {
ChexVariantType[name.upper()]: var
for name, var in which_variants.items()
}
if isinstance(test_object, VariantsTestCaseGenerator):
# Merge variants for nested wrappers.
test_object.add_variants(which_variants)
else:
test_object = VariantsTestCaseGenerator(test_object, which_variants)
return test_object
@toolz.curry
# pylint: disable=redefined-outer-name
def variants(test_method,
with_jit: bool = False,
without_jit: bool = False,
with_device: bool = False,
without_device: bool = False,
with_pmap: bool = False) -> VariantsTestCaseGenerator:
# pylint: enable=redefined-outer-name
"""Decorates a test to expose Chex variants.
The decorated test has access to a decorator called ``self.variant``, which
may be applied to functions to test different JAX behaviors. Consider:
.. code-block:: python
@chex.variants(with_jit=True, without_jit=True)
def test(self):
@self.variant
def f(x, y):
return x + y
self.assertEqual(f(1, 2), 3)
In this example, the function ``test`` will be called twice: once with `f`
jitted (i.e. using `jax.jit`) and another where `f` is not jitted.
Variants `with_jit=True` and `with_pmap=True` accept additional specific to
them arguments. Example:
.. code-block:: python
@chex.variants(with_jit=True)
def test(self):
@self.variant(static_argnums=(1,))
def f(x, y):
# `y` is not traced.
return x + y
self.assertEqual(f(1, 2), 3)
Variant `with_pmap=True` also accepts `broadcast_args_to_devices`
(whether to broadcast each input argument to all participating devices),
`reduce_fn` (a function to apply to results of pmapped `fn`), and
`n_devices` (number of devices to use in the `pmap` computation).
See the docstring of `_with_pmap` for more details (including default values).
If used with ``absl.testing.parameterized``, `@chex.variants` must wrap it:
.. code-block:: python
@chex.variants(with_jit=True, without_jit=True)
@parameterized.named_parameters('test', *args)
def test(self, *args):
...
Tests that use this wrapper must be inherited from ``parameterized.TestCase``.
For more examples see ``variants_test.py``.
Args:
test_method: A test method to decorate.
with_jit: Whether to test with `jax.jit`.
without_jit: Whether to test without `jax.jit`. Any jit compilation done
within the test method will not be affected.
with_device: Whether to test with args placed on device, using
`jax.device_put`.
without_device: Whether to test with args (explicitly) not placed on device,
using `jax.device_get`.
with_pmap: Whether to test with `jax.pmap`, with computation duplicated
across devices.
Returns:
A decorated ``test_method``.
"""
return _variants_fn(
test_method,
with_jit=with_jit,
without_jit=without_jit,
with_device=with_device,
without_device=without_device,
with_pmap=with_pmap)
@toolz.curry
# pylint: disable=redefined-outer-name
def all_variants(test_method,
with_jit: bool = True,
without_jit: bool = True,
with_device: bool = True,
without_device: bool = True,
with_pmap: bool = True) -> VariantsTestCaseGenerator:
# pylint: enable=redefined-outer-name
"""Equivalent to ``chex.variants`` but with flipped defaults."""
return _variants_fn(
test_method,
with_jit=with_jit,
without_jit=without_jit,
with_device=with_device,
without_device=without_device,
with_pmap=with_pmap)
def check_variant_arguments(variant_fn):
"""Raises `ValueError` if `variant_fn` got an unknown argument."""
@functools.wraps(variant_fn)
def wrapper(*args, **kwargs):
unknown_args = set(kwargs.keys()) - _valid_kwargs_keys
if unknown_args:
raise ValueError(f"Unknown arguments in `self.variant`: {unknown_args}.")
return variant_fn(*args, **kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_jit(fn,
static_argnums=None,
static_argnames=None,
device=None,
backend=None,
**unused_kwargs):
"""Variant that applies `jax.jit` to fn."""
return jax.jit(
fn,
static_argnums=static_argnums,
static_argnames=static_argnames,
device=device,
backend=backend)
@toolz.curry
@check_variant_arguments
def _without_jit(fn, **unused_kwargs):
"""Variant that does not apply `jax.jit` to a fn (identity)."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_device(fn, ignore_argnums=(), static_argnums=(), **unused_kwargs):
"""Variant that applies `jax.device_put` to the args of fn."""
if isinstance(ignore_argnums, int):
ignore_argnums = (ignore_argnums,)
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
def put(x):
try:
return jax.device_put(x)
except TypeError: # not a valid JAX type
return x
device_args = [
arg if (idx in ignore_argnums or idx in static_argnums) else tree_map(
put, arg) for idx, arg in enumerate(args)
]
device_kwargs = tree_map(put, kwargs)
return fn(*device_args, **device_kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _without_device(fn, **unused_kwargs):
"""Variant that applies `jax.device_get` to the args of fn."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
def get(x):
if isinstance(x, jax.Array):
return jax.device_get(x)
return x
no_device_args = tree_map(get, args)
no_device_kwargs = tree_map(get, kwargs)
return fn(*no_device_args, **no_device_kwargs)
return wrapper
@toolz.curry
@check_variant_arguments
def _with_pmap(fn,
broadcast_args_to_devices=True,
reduce_fn="first_device_output",
n_devices=None,
axis_name="i",
devices=None,
in_axes=0,
static_broadcasted_argnums=(),
static_argnums=(),
backend=None,
**unused_kwargs):
"""Variant that applies `jax.pmap` to fn.
Args:
fn: A function to wrap.
broadcast_args_to_devices: Whether to broadcast `fn` args to pmap format
(i.e. pmapped axes' sizes == a number of devices).
reduce_fn: A function to apply to outputs of `fn`.
n_devices: A number of devices to use (can specify a `backend` if required).
axis_name: An argument for `pmap`.
devices: An argument for `pmap`.
in_axes: An argument for `pmap`.
static_broadcasted_argnums: An argument for `pmap`.
static_argnums: An alias of ``static_broadcasted_argnums``.
backend: An argument for `pmap`.
**unused_kwargs: Unused kwargs (e.g. related to other variants).
Returns:
Wrapped `fn` that accepts `args` and `kwargs` and returns a superposition of
`reduce_fn` and `fn` applied to them.
Raises:
ValueError: If `broadcast_args_to_devices` used with `in_axes` or
`static_broadcasted_argnums`; if number of available devices is less than
required; if pmappable arg axes' sizes are not equal to the number of
devices.
SkipTest: If the flag ``chex_skip_pmap_variant_if_single_device`` is set and
there is only one device available.
"""
if (FLAGS["chex_skip_pmap_variant_if_single_device"].value and
jax.device_count() < 2):
raise unittest.SkipTest(f"Only 1 device is available ({jax.devices()}).")
if broadcast_args_to_devices and in_axes != 0:
raise ValueError(
"Do not use `broadcast_args_to_devices` when specifying `in_axes`.")
# Set up a reduce function.
if reduce_fn == "first_device_output":
reduce_fn = lambda t: tree_map(lambda x: x[0], t)
elif reduce_fn == "identity" or reduce_fn is None: # Identity.
reduce_fn = lambda t: t
if not static_argnums and static_argnums != 0:
static_argnums = static_broadcasted_argnums
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
pmap_kwargs = dict(
axis_name=axis_name,
devices=devices,
in_axes=in_axes,
static_broadcasted_argnums=static_argnums,
backend=backend)
pmapped_fn = jax.pmap(fn, **pmap_kwargs)
@functools.wraps(pmapped_fn)
def wrapper(*args: pytypes.ArrayTree, **kwargs: pytypes.ArrayTree):
if kwargs and (in_axes != 0 or static_argnums):
raise ValueError("Do not use kwargs with `in_axes` or `static_argnums` "
"in pmapped function.")
devices_ = list(devices or jax.devices(backend))
n_devices_ = n_devices or len(devices_)
devices_ = devices_[:n_devices_]
if len(devices_) != n_devices_:
raise ValueError("Number of available devices is less than required for "
f"test ({len(devices_)} < {n_devices_})")
bcast_fn = lambda x: jnp.broadcast_to(x, (n_devices_,) + jnp.array(x).shape)
if broadcast_args_to_devices:
args = [
tree_map(bcast_fn, arg) if idx not in static_argnums else arg
for idx, arg in enumerate(args)
]
kwargs = tree_map(bcast_fn, kwargs)
else:
# Pmappable axes size must be equal to number of devices.
in_axes_ = in_axes if isinstance(in_axes,
(tuple, list)) else [in_axes] * len(args)
is_pmappable_arg = [
idx not in static_argnums and in_axes_[idx] is not None
for idx in range(len(args))
]
for is_pmappable_arg, arg in zip(is_pmappable_arg, args):
if not is_pmappable_arg:
continue
if not all(
x.shape[0] == n_devices_ for x in jax.tree_util.tree_leaves(arg)):
shapes = tree_map(jnp.shape, arg)
raise ValueError(
f"Pmappable arg axes size must be equal to number of devices, "
f"got: {shapes} (expected the first dim to be {n_devices_}). "
"Consider setting `broadcast_args_to_devices=True`.")
new_kwargs = dict(
axis_name=axis_name,
devices=devices_,
in_axes=in_axes,
static_broadcasted_argnums=static_argnums,
backend=backend)
# Re-compile fn if kwargs changed.
nonlocal pmap_kwargs
nonlocal pmapped_fn
if new_kwargs != pmap_kwargs:
pmap_kwargs = new_kwargs
pmapped_fn = jax.pmap(fn, **pmap_kwargs)
res = pmapped_fn(*args, **kwargs)
return reduce_fn(res)
return wrapper
_variant_decorators = dict({
ChexVariantType.WITH_JIT: _with_jit,
ChexVariantType.WITHOUT_JIT: _without_jit,
ChexVariantType.WITH_DEVICE: _with_device,
ChexVariantType.WITHOUT_DEVICE: _without_device,
ChexVariantType.WITH_PMAP: _with_pmap,
})
class Variant:
"""Variant class for typing and string representation."""
def __init__(self, name, fn):
self._fn = fn
self._name = name
def __repr__(self):
return self._name
def __call__(self, *args, **kwargs):
# Could apply decorators (currying, arg-checking) here
return self._fn(*args, **kwargs)
# Expose variant objects.
without_device = Variant("chex_without_device", _without_device)
without_jit = Variant("chex_without_jit", _without_jit)
with_device = Variant("chex_with_device", _with_device)
with_jit = Variant("chex_with_jit", _with_jit)
with_pmap = Variant("chex_with_pmap", _with_pmap)
ALL_VARIANTS = (without_device, without_jit, with_device, with_jit, with_pmap)
# Collect valid argument names from all variant decorators.
_valid_kwargs_keys = set()
for fn_ in _variant_decorators.values():
original_fn = fn_.func.__wrapped__
_valid_kwargs_keys.update(inspect.getfullargspec(original_fn).args)
|
chex-master
|
chex/_src/variants.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `asserts_chexify.py`."""
import functools
import re
import sys
import threading
import time
from typing import Any, Optional, Sequence, Type
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import asserts_chexify
from chex._src import asserts_internal as _ai
from chex._src import variants
import jax
import jax.numpy as jnp
import numpy as np
EPS = 1e-6
chexify_async = functools.partial(asserts_chexify.chexify, async_check=True)
chexify_sync = functools.partial(asserts_chexify.chexify, async_check=False)
def get_chexify_err_regex(name, msg):
return re.escape(_ai.get_chexify_err_message(name, 'ANY')).replace(
'ANY', f'.*{msg}.*'
)
# Follows `ai.TChexAssertion`'s API.
def _assert_noop(*args,
custom_message: Optional[str] = None,
custom_message_format_vars: Sequence[Any] = (),
include_default_message: bool = True,
exception_type: Type[Exception] = AssertionError,
**kwargs) -> None:
"""No-op."""
del args, custom_message, custom_message_format_vars
del include_default_message, exception_type, kwargs
# Define a simple Chex assertion for testing purposes.
def _assert_tree_positive(tree):
# Use jnp instead of np for testing purposes.
if not all((x > 0).all() for x in jax.tree_util.tree_leaves(tree)):
raise AssertionError('Tree contains non-positive elems!')
def _jittable_assert_tree_positive(tree):
# Jittable version of `_assert_tree_positive`.
pred = jnp.all(
jnp.array([(x > 0).all() for x in jax.tree_util.tree_leaves(tree)]))
asserts_chexify.checkify.check(pred, 'Tree contains non-positive elems!')
return pred
chex_static_assert_positive = _ai.chex_assertion(
assert_fn=_assert_tree_positive,
jittable_assert_fn=None,
name='assert_tree_positive_test')
chex_value_assert_positive = _ai.chex_assertion(
assert_fn=_assert_tree_positive,
jittable_assert_fn=_jittable_assert_tree_positive,
name='assert_tree_positive_test')
class AssertsChexifyTest(variants.TestCase):
"""Simple tests for chexify assertions."""
@variants.variants(with_jit=True, without_jit=True)
def test_static_assertion(self):
# Tests that static assertions can be used w/ and w/o `chexify()`.
shape = (2, 3)
# Define a simple static assertion.
@asserts._static_assertion
def chex_assert_shape(array, expected):
if array.shape != expected:
raise AssertionError('Wrong shape!')
# Define a simple function that uses the assertion.
def _sum_fn(tree):
jax.tree_map(lambda x: chex_assert_shape(x, shape), tree)
return sum(x.sum() for x in jax.tree_util.tree_leaves(tree))
chexified_sum_fn = chexify_sync(self.variant(_sum_fn))
# Passes in all contexts.
x = {'a': np.ones(shape), 'b': {'c': np.ones(shape)}}
self.assertEqual(_sum_fn(x), 2 * np.prod(shape))
self.assertEqual(self.variant(_sum_fn)(x), 2 * np.prod(shape))
self.assertEqual(chexified_sum_fn(x), 2 * np.prod(shape))
# Fails in all contexts.
x_wrong_shape = {'a': np.ones(shape), 'b': {'c': np.ones(shape + shape)}}
with self.assertRaisesRegex(AssertionError, 'Wrong shape!'):
_sum_fn(x_wrong_shape)
with self.assertRaisesRegex(AssertionError, 'Wrong shape!'):
self.variant(_sum_fn)(x_wrong_shape)
with self.assertRaisesRegex(AssertionError, 'Wrong shape!'):
chexified_sum_fn(x_wrong_shape)
@variants.variants(with_jit=True, without_jit=True)
def test_nested_chexification(self):
"""Tests nested wrapping."""
@chexify_sync
@self.variant
def _pos_sum(x_1, x_2):
@chexify_sync
def _chexified_assert_fn(x):
chex_value_assert_positive(x, custom_message='err_label_1')
_chexified_assert_fn(x_1)
chex_value_assert_positive(x_2, custom_message='err_label_2')
return x_1 + x_2
with self.assertRaisesRegex(RuntimeError,
'Nested @chexify wrapping is disallowed'):
_pos_sum(1, 1)
def test_async_mode(self):
@jax.jit
def _pos_sq(x):
chex_value_assert_positive(x, custom_message='err_label')
return jnp.dot(x, x) + 3
valid_x = jnp.ones((1000, 1000))
invalid_x = -valid_x
# Test sync.
sync_check = chexify_sync(_pos_sq)
sync_check(valid_x)
with self.assertRaisesRegex(AssertionError, 'err_label'):
sync_check(invalid_x)
# Test async.
async_check = chexify_async(_pos_sq)
async_check(valid_x)
# Implicit wait, through timer.
async_check(invalid_x) # enqueued and immediately returned
time.sleep(5)
with self.assertRaisesRegex(AssertionError, 'err_label'):
async_check(valid_x) # the error gets retrieved
# Implicit wait, through the subsequent call & barrier.
async_check(invalid_x) # enqueued and immediately returned
# error is not ready yet, so no assertion raised
async_check(valid_x).block_until_ready()
time.sleep(1) # wait until the corresponding future is notified
with self.assertRaisesRegex(AssertionError, 'err_label'):
async_check(valid_x) # the error gets retrieved
# Explicit wait, through object-local wait.
async_check(invalid_x) # enqueued
with self.assertRaisesRegex(AssertionError, 'err_label'):
async_check.wait_checks() # the error gets retrieved
# Explicit wait, through module-level wait.
async_check(invalid_x) # enqueued
with self.assertRaisesRegex(AssertionError, 'err_label'):
asserts_chexify.block_until_chexify_assertions_complete()
def test_uninspected_checks(self):
@jax.jit
def _pos_sum(x):
chex_value_assert_positive(x, custom_message='err_label')
return x.sum()
invalid_x = -jnp.ones(3)
chexify_async(_pos_sum)(invalid_x) # async error
raised_exception = None
def unraisablehook(unraisable):
nonlocal raised_exception
raised_exception = unraisable.exc_value
if sys.version_info >= (3, 10):
# In Python 3.10+, _run_exitfuncs logs the exception using unraisablehook
# without raising the last exception.
old_unraisablehook = sys.unraisablehook
sys.unraisablehook = unraisablehook
try:
asserts_chexify.atexit._run_exitfuncs()
finally:
sys.unraisablehook = old_unraisablehook
self.assertIsInstance(raised_exception, AssertionError)
self.assertIn('err_label', str(raised_exception))
else:
with self.assertRaisesRegex(AssertionError, 'err_label'):
asserts_chexify.atexit._run_exitfuncs()
def test_docstring_example(self):
@chexify_async
@jax.jit
def logp1_abs_safe(x):
asserts.assert_tree_all_finite(x)
return jnp.log(jnp.abs(x) + 1)
logp1_abs_safe(jnp.ones(2)) # OK
asserts_chexify.block_until_chexify_assertions_complete()
err_regex = re.escape(_ai.get_chexify_err_message('assert_tree_all_finite'))
with self.assertRaisesRegex(AssertionError, f'{err_regex}.*chexify_test'):
logp1_abs_safe(jnp.array([jnp.nan, 3])) # FAILS
logp1_abs_safe.wait_checks()
def test_checkify_errors(self):
@jax.jit
def take_by_index_and_div(x, i, y):
return x[i] / y
# Checks only Div-by-0.
take_by_index_and_div_safe_div = asserts_chexify.chexify(
take_by_index_and_div,
async_check=False,
errors=asserts_chexify.ChexifyChecks.div,
)
take_by_index_and_div_safe_div(jnp.ones(2), 1, 2) # OK
take_by_index_and_div_safe_div(jnp.ones(2), 10, 2) # OOB undetected
with self.assertRaisesRegex(
asserts_chexify.checkify.JaxRuntimeError, 'division by zero'
):
take_by_index_and_div_safe_div(jnp.ones(2), 1, 0) # Div-by-0
# Checks both OOB and Div-by-0.
take_by_index_and_div_safe = asserts_chexify.chexify(
take_by_index_and_div,
async_check=False,
errors=(
asserts_chexify.ChexifyChecks.index
| asserts_chexify.ChexifyChecks.div
),
)
take_by_index_and_div_safe(jnp.ones(2), 1, 2) # OK
with self.assertRaisesRegex(
asserts_chexify.checkify.JaxRuntimeError, 'out-of-bounds'
):
take_by_index_and_div_safe(jnp.ones(2), 10, 2) # OOB
with self.assertRaisesRegex(
asserts_chexify.checkify.JaxRuntimeError, 'division by zero'
):
take_by_index_and_div_safe(jnp.ones(2), 1, 0) # Div-by-0
with self.assertRaisesRegex(
asserts_chexify.checkify.JaxRuntimeError, 'out-of-bounds'
):
take_by_index_and_div_safe(jnp.ones(2), 10, 0) # OOB (first) & Div-by-0
def test_partial_python_fn(self):
def fn(x, y):
asserts.assert_trees_all_equal(x, y)
return jax.tree_map(jnp.add, x, y)
partial_fn = functools.partial(fn, y=jnp.array([1]))
chexified_fn = chexify_async(partial_fn) # note: fn is not transformed
chexified_fn(jnp.array([1]))
chexified_fn.wait_checks()
with self.assertRaisesRegex(AssertionError, '0 and 1 differ'):
chexified_fn(jnp.array([2]))
chexified_fn.wait_checks() # Fail: not equal
class AssertsChexifyTestSuite(variants.TestCase):
"""Test suite for chexify assertions."""
def run_test_suite(self, make_test_fn, all_valid_args, all_invalid_args,
failure_labels, jax_transform, run_pure):
"""Runs a set of tests for static & value assertions.
See `run_test_suite_with_log_abs_fn` for example.
Args:
make_test_fn: A function that returns a pure function to transform.
all_valid_args: A list of collections of args that pass assertions.
all_invalid_args: A list of collections of args that fail assertions.
failure_labels: A list of custom labels, one per every failed assertion.
jax_transform: A function that accepts a pure function and returns its
transformed version.
run_pure: A bool suggesting whether pure_fn can be called without
transforms (e.g. it isn't the case for f-ns that use JAX collectives).
"""
assert len(all_invalid_args) == len(failure_labels)
# Define 3 versions of the tested function.
if run_pure:
fn_no_assert = make_test_fn(_assert_noop)
fn_static_assert = make_test_fn(chex_static_assert_positive)
fn_value_assert = make_test_fn(chex_value_assert_positive)
# Wrapped fn with value asserts.
chexified_fn_with_value_asserts = chexify_sync(
jax_transform(fn_value_assert))
# Run tests with valid arguments.
for valid_args in all_valid_args:
if run_pure:
# Test all versions return the same outputs.
asserts.assert_trees_all_equal(
fn_no_assert(*valid_args), fn_static_assert(*valid_args))
asserts.assert_trees_all_equal(
fn_no_assert(*valid_args), fn_value_assert(*valid_args))
# `ConcretizationTypeError` if static assertion is used in value checks.
with self.assertRaises(jax.errors.ConcretizationTypeError):
jax_transform(fn_static_assert)(*valid_args)
# Value assertions pass.
chexified_fn_with_value_asserts(*valid_args)
# Reports incorrect usage without `chexify()`.
with self.assertRaisesRegex(
RuntimeError, 'can only be called from functions wrapped .*chexify'):
# Create a local object to avoid reusing jax internal cache.
local_fn_value_assert = make_test_fn(chex_value_assert_positive)
jax_transform(local_fn_value_assert)(*valid_args)
# Run tests with invalid arguments.
for invalid_args, label in zip(all_invalid_args, failure_labels):
if run_pure:
# Static assertion fails on incorrect inputs (without transformations).
with self.assertRaisesRegex(AssertionError, re.escape(label)):
fn_static_assert(*invalid_args)
# Value assertion fails on incorrect inputs (with transformations).
err_regex = get_chexify_err_regex('assert_tree_positive_test', label)
with self.assertRaisesRegex(AssertionError, err_regex):
chexified_fn_with_value_asserts(*invalid_args)
# Reports incorrect usage without `chexify()`.
with self.assertRaisesRegex(
RuntimeError, 'can only be called from functions wrapped .*chexify'):
# Create a local object to avoid reusing jax internal cache.
local_fn_value_assert = make_test_fn(chex_value_assert_positive)
jax_transform(local_fn_value_assert)(*invalid_args)
def run_test_suite_with_log_abs_fn(self, make_log_fn, jax_transform, devices,
run_pure, run_in_thread):
"""Generates valid and invalid inputs for log_abs_fn and runs the tests."""
x_pos = {
'a': np.ones((10, 2)),
'b': {
'c': np.array([[5, 2] for _ in range(10)])
}
}
x_with_neg = {
'a': np.ones((10, 2)),
'b': {
'c': np.array([[5, -1] for _ in range(10)])
}
}
(x_pos, x_with_neg) = jax.device_put_replicated((x_pos, x_with_neg),
devices)
all_valid_args = ((x_pos, x_pos),)
all_invalid_args = (
(x_with_neg, x_pos),
(x_pos, x_with_neg),
(x_with_neg, x_with_neg),
)
failure_labels = (
'label_1',
'label_2',
'label_1',
)
run_tests_callback = functools.partial(self.run_test_suite, make_log_fn,
all_valid_args, all_invalid_args,
failure_labels, jax_transform,
run_pure)
if run_in_thread:
failure = AssertionError('not executed')
def _run_tests_in_thread():
nonlocal failure
failure = None
try:
run_tests_callback()
except Exception as e: # pylint:disable=broad-except
failure = e
thr = threading.Thread(target=_run_tests_in_thread, daemon=False)
thr.start()
thr.join(timeout=30)
if thr.is_alive():
raise TimeoutError('Thread is alive after 30 seconds.')
if failure is not None:
raise AssertionError(f'Thread failed with: {failure}.')
else:
run_tests_callback()
@parameterized.named_parameters(('main', False), ('thread', True))
def test_log_abs_fn_jitted(self, run_in_thread):
"""Tests simple jit transformation."""
def _make_log_fn(assert_input_fn: _ai.TChexAssertion):
def _pure_log_fn(tree_1, tree_2):
# Call twice to make sure all deps are retained after XLA optimizations.
assert_input_fn(tree_1, custom_message='label_1')
assert_input_fn(tree_2, custom_message='label_2')
return jax.tree_map(lambda x1, x2: jnp.log(jnp.abs(x1 + x2) + EPS),
tree_1, tree_2)
return _pure_log_fn
with jax.checking_leaks():
self.run_test_suite_with_log_abs_fn(
make_log_fn=_make_log_fn,
jax_transform=jax.jit,
devices=jax.local_devices()[:1],
run_pure=True,
run_in_thread=run_in_thread)
@parameterized.named_parameters(('main', False), ('thread', True))
def test_log_abs_fn_jitted_nested_wrap(self, run_in_thread):
"""Tests nested jit transforms (wrapping)."""
def _make_log_fn(assert_input_fn: _ai.TChexAssertion):
@jax.jit
def _abs(tree):
assert_input_fn(tree, custom_message='label_1')
tree_p1 = jax.tree_map(lambda x: x + 1, tree)
return jax.tree_map(jnp.abs, tree_p1)
def _pure_log_fn(tree_1, tree_2):
tree_1 = _abs(tree_1)
assert_input_fn(tree_2, custom_message='label_2')
return jax.tree_map(lambda x1, x2: jnp.log(jnp.abs(x1 + x2) + EPS),
tree_1, tree_2)
return _pure_log_fn
with jax.checking_leaks():
self.run_test_suite_with_log_abs_fn(
make_log_fn=_make_log_fn,
jax_transform=jax.jit,
devices=jax.local_devices()[:1],
run_pure=False, # do not run because internal jit is not checkified
run_in_thread=run_in_thread)
@parameterized.named_parameters(('main', False), ('thread', True))
def test_log_abs_fn_jitted_nested_call(self, run_in_thread):
"""Tests nested jit transforms (calling)."""
def _make_log_fn(assert_input_fn: _ai.TChexAssertion):
def _abs(tree):
assert_input_fn(tree, custom_message='label_1')
tree_p1 = jax.tree_map(lambda x: x + 1, tree)
return jax.tree_map(jnp.abs, tree_p1)
def _pure_log_fn(tree_1, tree_2):
tree_1 = jax.jit(_abs)(tree_1)
assert_input_fn(tree_2, custom_message='label_2')
return jax.tree_map(lambda x1, x2: jnp.log(jnp.abs(x1 + x2) + EPS),
tree_1, tree_2)
return _pure_log_fn
with jax.checking_leaks():
self.run_test_suite_with_log_abs_fn(
make_log_fn=_make_log_fn,
jax_transform=jax.jit,
devices=jax.local_devices()[:1],
run_pure=False, # do not run because internal jit is not checkified
run_in_thread=run_in_thread)
@parameterized.named_parameters(('main', False), ('thread', True))
def test_log_abs_fn_pmapped(self, run_in_thread):
"""Tests pmap transform."""
def _make_log_fn(assert_input_fn: _ai.TChexAssertion):
def _pure_log_fn(tree_1, tree_2):
# Call twice to make sure all deps are retained after XLA optimizations.
assert_input_fn(tree_1, custom_message='label_1')
assert_input_fn(tree_2, custom_message='label_2')
tree_1 = jax.lax.pmean(tree_1, axis_name='i')
return jax.tree_map(lambda x1, x2: jnp.log(jnp.abs(x1 + x2) + EPS),
tree_1, tree_2)
return _pure_log_fn
with jax.checking_leaks():
self.run_test_suite_with_log_abs_fn(
make_log_fn=_make_log_fn,
jax_transform=lambda fn: jax.pmap(fn, axis_name='i'),
devices=jax.local_devices(),
run_pure=False, # do not run because the f-n contains collective ops
run_in_thread=run_in_thread)
@parameterized.named_parameters(('main', False), ('thread', True))
def test_log_abs_fn_jitted_vmapped(self, run_in_thread):
"""Tests vmap transform."""
def _make_log_fn(assert_input_fn: _ai.TChexAssertion):
def _pure_log_fn(tree_1, tree_2):
# Call twice to make sure all deps are retained after XLA optimizations.
assert_input_fn(tree_1, custom_message='label_1')
assert_input_fn(tree_2, custom_message='label_2')
return jax.tree_map(lambda x1, x2: jnp.log(jnp.abs(x1 + x2) + EPS),
tree_1, tree_2)
return _pure_log_fn
with jax.checking_leaks():
self.run_test_suite_with_log_abs_fn(
make_log_fn=_make_log_fn,
jax_transform=lambda fn: jax.jit(jax.vmap(fn)), # jax + vmap
devices=jax.local_devices()[:1],
run_pure=True,
run_in_thread=run_in_thread,
)
class AssertsLibraryTest(parameterized.TestCase):
def test_assert_tree_all_finite(self):
@jax.jit
def fn(x):
asserts.assert_tree_all_finite(x)
return jax.tree_map(jnp.sum, x)
chexified_fn = asserts_chexify.chexify(fn, async_check=False)
chexified_fn({'a': 0, 'b': jnp.ones(3)}) # OK
err_regex = get_chexify_err_regex(
'assert_tree_all_finite', 'Tree contains non-finite value'
)
with self.assertRaisesRegex(AssertionError, err_regex):
chexified_fn({'a': 1, 'b': jnp.array([1, jnp.nan, 3])}) # NaN
with self.assertRaisesRegex(AssertionError, re.escape("'b': Array(nan")):
chexified_fn({'a': 1, 'b': jnp.array([1, jnp.nan, 3])}) # NaN
def test_assert_trees_all_equal(self):
@jax.jit
def fn(x, y):
asserts.assert_trees_all_equal(x, y)
return jax.tree_map(jnp.add, x, y)
chexified_fn = asserts_chexify.chexify(fn, async_check=False)
tree_1 = {'a': jnp.array([3]), 'b': jnp.array([10, 10])}
tree_2 = {'a': jnp.array([3]), 'b': jnp.array([10, 20])}
chexified_fn(tree_1, tree_1) # OK
err_regex = get_chexify_err_regex(
'assert_trees_all_equal', 'Values not exactly equal:'
)
with self.assertRaisesRegex(AssertionError, err_regex):
chexified_fn(tree_1, tree_2) # Fail: not equal
with self.assertRaisesRegex(
AssertionError, re.escape("Trees 0 and 1 differ in leaves '('b',)'")
):
chexified_fn(tree_1, tree_2) # Fail: not equal
def test_assert_trees_all_close(self):
@jax.jit
def fn(x, y, z):
asserts.assert_trees_all_close(x, y, z, rtol=0.5, atol=0.5)
return jax.tree_map(jnp.add, x, y)
chexified_fn = asserts_chexify.chexify(fn, async_check=False)
tree_1 = {1: {'a': jnp.array([3]), 'b': jnp.array([10, 10])}}
tree_1_close = {1: {'a': jnp.array([3.1]), 'b': jnp.array([10.1, 10.1])}}
tree_2 = {1: {'a': jnp.array([3]), 'b': jnp.array([10, 20])}}
chexified_fn(tree_1, tree_1, tree_1) # OK
chexified_fn(tree_1, tree_1_close, tree_1) # OK
err_regex = get_chexify_err_regex(
'assert_trees_all_close', 'Values not approximately equal'
)
with self.assertRaisesRegex(AssertionError, err_regex):
chexified_fn(tree_1, tree_2, tree_1) # Fail: not close
with self.assertRaisesRegex(
AssertionError, re.escape("Trees 0 and 2 differ in leaves '(1, 'b')':")
):
chexified_fn(tree_1, tree_1_close, tree_2) # Fail: not close
def test_custom_message(self):
@jax.jit
def fn(x, y):
asserts.assert_trees_all_equal(
x,
y,
custom_message='sum(x)={}',
custom_message_format_vars=[
sum(l.sum() for l in jax.tree_util.tree_leaves(x))
],
)
return jax.tree_map(jnp.add, x, y)
chexified_fn = asserts_chexify.chexify(fn, async_check=False)
tree_1 = {'a': jnp.array([3]), 'b': jnp.array([10, 10])}
tree_2 = {'a': jnp.array([3]), 'b': jnp.array([10, 320])}
with self.assertRaisesRegex(
AssertionError, re.escape('Custom message: sum(x)=') + '.*23'
):
chexified_fn(tree_1, tree_2) # Fail: not equal
with self.assertRaisesRegex(
AssertionError, re.escape('Custom message: sum(x)=') + '.*333'
):
chexified_fn(tree_2, tree_1) # Fail: not equal
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
|
chex-master
|
chex/_src/asserts_chexify_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to patch JAX functions with faked implementations.
This module provides fake implementations of jax.jit and jax.pmap, which can be
patched over existing implementations for easier debugging.
See https://www.martinfowler.com/articles/mocksArentStubs.html
"""
import contextlib
import functools
import inspect
import os
import re
from typing import Any, Callable, Iterable, Optional, Union
from unittest import mock
from absl import flags
import jax
import jax.numpy as jnp
FLAGS = flags.FLAGS
flags.DEFINE_integer('chex_n_cpu_devices', 1,
'Number of CPU threads to use as devices in tests.')
flags.DEFINE_bool('chex_assert_multiple_cpu_devices', False,
'Whether to fail if a number of CPU devices is less than 2.')
_xla_device_count_flag_regexp = (
r'[-]{0,2}xla_force_host_platform_device_count=(\d+)?(\s|$)')
def get_n_cpu_devices_from_xla_flags() -> int:
"""Parses number of CPUs from the XLA environment flags."""
m = re.match(_xla_device_count_flag_regexp, os.getenv('XLA_FLAGS', ''))
# At least one CPU device must be available.
n_devices = int(m.group(1)) if m else 1
return n_devices
def set_n_cpu_devices(n: Optional[int] = None) -> None:
"""Forces XLA to use `n` CPU threads as host devices.
This allows `jax.pmap` to be tested on a single-CPU platform.
This utility only takes effect before XLA backends are initialized, i.e.
before any JAX operation is executed (including `jax.devices()` etc.).
See https://github.com/google/jax/issues/1408.
Args:
n: A required number of CPU devices (``FLAGS.chex_n_cpu_devices`` is used by
default).
Raises:
RuntimeError: If XLA backends were already initialized.
"""
n = n or FLAGS['chex_n_cpu_devices'].value
n_devices = get_n_cpu_devices_from_xla_flags()
cpu_backend = (jax.lib.xla_bridge._backends or {}).get('cpu', None) # pylint: disable=protected-access
if cpu_backend is not None and n_devices != n:
raise RuntimeError(
f'Attempted to set {n} devices, but {n_devices} CPUs already available:'
' ensure that `set_n_cpu_devices` is executed before any JAX operation.'
)
xla_flags = os.getenv('XLA_FLAGS', '')
xla_flags = re.sub(_xla_device_count_flag_regexp, '', xla_flags)
os.environ['XLA_FLAGS'] = ' '.join(
[f'--xla_force_host_platform_device_count={n}'] + xla_flags.split())
def convert_to_varargs(sig, *args, **kwargs):
"""Converts varargs+kwargs function arguments into varargs only."""
bound_args = sig.bind(*args, **kwargs)
return bound_args.args
@functools.wraps(jax.jit)
def _fake_jit(fn, *unused_args, **unused_kwargs):
return fn
def _ignore_axis_index_groups(fn):
"""Wrapper that forces axis_index_groups to be None.
This is to avoid problems within fake_pmap where parallel operations are
performed with vmap, rather than pmap. Parallel operations where
`axis_index_groups` is not `None` are not currently supported under vmap.
Args:
fn: the function to wrap
Returns:
a wrapped function that forces any keyword argument named
`axis_index_groups` to be None
"""
@functools.wraps(fn)
def _fake(*args, axis_index_groups=None, **kwargs):
del axis_index_groups
return fn(*args, axis_index_groups=None, **kwargs)
return _fake
_fake_all_gather = _ignore_axis_index_groups(jax.lax.all_gather)
_fake_all_to_all = _ignore_axis_index_groups(jax.lax.all_to_all)
_fake_psum = _ignore_axis_index_groups(jax.lax.psum)
_fake_pmean = _ignore_axis_index_groups(jax.lax.pmean)
_fake_pmax = _ignore_axis_index_groups(jax.lax.pmax)
_fake_pmin = _ignore_axis_index_groups(jax.lax.pmin)
_fake_pswapaxes = _ignore_axis_index_groups(jax.lax.pswapaxes)
@functools.wraps(jax.pmap)
def _fake_pmap(fn,
axis_name: Optional[Any] = None,
*,
in_axes=0,
static_broadcasted_argnums: Union[int, Iterable[int]] = (),
jit_result: bool = False,
fake_parallel_axis: bool = False,
**unused_kwargs):
"""Fake implementation of pmap using vmap."""
if isinstance(static_broadcasted_argnums, int):
static_broadcasted_argnums = (static_broadcasted_argnums,)
if static_broadcasted_argnums and isinstance(in_axes, dict):
raise NotImplementedError(
'static_broadcasted_argnums with dict in_axes not supported.')
fn_signature = inspect.signature(
fn,
# Disable 'follow wrapped' because we want the exact signature of fn,
# not the signature of any function it might wrap.
follow_wrapped=False)
@functools.wraps(fn)
def wrapped_fn(*args, **kwargs):
# Convert kwargs to varargs
# This is a workaround for vmapped functions not working with kwargs
call_args = convert_to_varargs(fn_signature, *args, **kwargs)
if static_broadcasted_argnums:
# Make sure vmap does not try to map over `static_broadcasted_argnums`.
if isinstance(in_axes, int):
vmap_in_axes = [in_axes] * len(call_args)
else:
vmap_in_axes = list(in_axes)
for argnum in static_broadcasted_argnums:
vmap_in_axes[argnum] = None
# To protect the arguments from `static_broadcasted_argnums`,
# from turning into tracers (because of vmap), we capture the original
# `call_args` and replace the passed in tracers with original values.
original_call_args = call_args
# A function passed to vmap, that will simply replace the static args
# with their original values.
def fn_without_statics(*args):
args_with_original_statics = [
orig_arg if i in static_broadcasted_argnums else arg
for i, (arg, orig_arg) in enumerate(zip(args, original_call_args))
]
return fn(*args_with_original_statics)
# Make sure to avoid turning static args into tracers: Some python objects
# might not survive vmap. Just replace with an unused constant.
call_args = [
1 if i in static_broadcasted_argnums else arg
for i, arg in enumerate(call_args)
]
else:
vmap_in_axes = in_axes
fn_without_statics = fn
vmapped_fn = jax.vmap(
fn_without_statics, in_axes=vmap_in_axes, axis_name=axis_name
)
if jit_result:
vmapped_fn = jax.jit(vmapped_fn)
if fake_parallel_axis:
call_args = jax.tree_util.tree_map(
lambda x: jnp.expand_dims(x, axis=0), call_args)
output = vmapped_fn(*call_args)
if fake_parallel_axis:
output = jax.tree_util.tree_map(lambda x: jnp.squeeze(x, axis=0), output)
return output
return wrapped_fn
# pylint:disable=unnecessary-dunder-call
class FakeContext(contextlib.ExitStack):
def start(self):
self.__enter__()
def stop(self):
self.__exit__(None, None, None)
# pylint:enable=unnecessary-dunder-call
def fake_jit(enable_patching: bool = True) -> FakeContext:
"""Context manager for patching `jax.jit` with the identity function.
This is intended to be used as a debugging tool to programmatically enable or
disable JIT compilation.
Can be used either as a context managed scope:
.. code-block:: python
with chex.fake_jit():
@jax.jit
def foo(x):
...
or by calling `start` and `stop`:
.. code-block:: python
fake_jit_context = chex.fake_jit()
fake_jit_context.start()
@jax.jit
def foo(x):
...
fake_jit_context.stop()
Args:
enable_patching: Whether to patch `jax.jit`.
Returns:
Context where `jax.jit` is patched with the identity function jax is
configured to avoid jitting internally whenever possible in functions
such as `jax.lax.scan`, etc.
"""
stack = FakeContext()
if enable_patching:
stack.enter_context(mock.patch('jax.jit', _fake_jit))
# Some functions like jax.lax.scan also internally use jit. Most respect
# the config setting `jax_disable_jit` and replace its implementation
# with a dummy, jit-free one if the setting is one. Use this mechanism too.
@contextlib.contextmanager
def _jax_disable_jit():
original_value = jax.config.jax_disable_jit
jax.config.update('jax_disable_jit', True)
try:
yield
finally:
jax.config.update('jax_disable_jit', original_value)
stack.enter_context(_jax_disable_jit())
return stack
def fake_pmap(
enable_patching: bool = True,
jit_result: bool = False,
ignore_axis_index_groups: bool = False,
fake_parallel_axis: bool = False,
) -> FakeContext:
"""Context manager for patching `jax.pmap` with `jax.vmap`.
This is intended to be used as a debugging tool to programmatically replace
pmap transformations with a non-parallel vmap transformation.
Can be used either as a context managed scope:
.. code-block:: python
with chex.fake_pmap():
@jax.pmap
def foo(x):
...
or by calling `start` and `stop`:
.. code-block:: python
fake_pmap_context = chex.fake_pmap()
fake_pmap_context.start()
@jax.pmap
def foo(x):
...
fake_pmap_context.stop()
Args:
enable_patching: Whether to patch `jax.pmap`.
jit_result: Whether the transformed function should be jitted despite not
being pmapped.
ignore_axis_index_groups: Whether to force any parallel operation within the
context to set `axis_index_groups` to be None. This is a compatibility
option to allow users of the axis_index_groups parameter to run under the
fake_pmap context. This feature is not currently supported in vmap, and
will fail, so we force the parameter to be `None`.
*Warning*: This will produce different results to running under `jax.pmap`
fake_parallel_axis: Fake a parallel axis
Returns:
Context where `jax.pmap` is patched with `jax.vmap`.
"""
stack = FakeContext()
if enable_patching:
patched_pmap = functools.partial(
_fake_pmap,
jit_result=jit_result,
fake_parallel_axis=fake_parallel_axis)
stack.enter_context(mock.patch('jax.pmap', patched_pmap))
if ignore_axis_index_groups:
stack.enter_context(mock.patch('jax.lax.all_gather', _fake_all_gather))
stack.enter_context(mock.patch('jax.lax.all_to_all', _fake_all_to_all))
stack.enter_context(mock.patch('jax.lax.psum', _fake_psum))
stack.enter_context(mock.patch('jax.lax.pmean', _fake_pmean))
stack.enter_context(mock.patch('jax.lax.pmax', _fake_pmax))
stack.enter_context(mock.patch('jax.lax.pmin', _fake_pmin))
stack.enter_context(mock.patch('jax.lax.pswapaxes', _fake_pswapaxes))
else:
# Use default implementations
pass
return stack
def fake_pmap_and_jit(enable_pmap_patching: bool = True,
enable_jit_patching: bool = True) -> FakeContext:
"""Context manager for patching `jax.jit` and `jax.pmap`.
This is a convenience function, equivalent to nested `chex.fake_pmap` and
`chex.fake_jit` contexts.
Note that calling (the true implementation of) `jax.pmap` will compile the
function, so faking `jax.jit` in this case will not stop the function from
being compiled.
Args:
enable_pmap_patching: Whether to patch `jax.pmap`.
enable_jit_patching: Whether to patch `jax.jit`.
Returns:
Context where jax.pmap and jax.jit are patched with jax.vmap and the
identity function
"""
stack = FakeContext()
stack.enter_context(fake_pmap(enable_pmap_patching))
stack.enter_context(fake_jit(enable_jit_patching))
return stack
class OnCallOfTransformedFunction():
"""Injects a callback into any transformed function.
A typical use-case is jax.jit or jax.pmap which is often hidden deep inside
the code. This context manager allows to inject a callback function into
functions which are transformed by the user-specified transformation.
The callback will receive the transformed function and its arguments.
The function can be useful to debug, profile and check the calls of any
transformed function in a program
For instance:
with chex.OnCallOfTransformedFunction('jax.jit', print):
[...]
would print all calls to any function which was jit-compiled within this
context.
We can also automatically create profiles on the first call of all the
jit compiled functions in the program:
class profile_once():
def __init__(self):
self._first_call = True
def __call__(self, fn, *args, **kwargs):
if self._first_call:
self._first_call = False
print(profile_from_HLO(fn.lower(*args, **kwargs))
with chex.OnCallOfTransformedFunction('jax.jit', profile_once()):
[...]
"""
def __init__(self, fn_transformation: str, callback_fn: Callable[..., Any]):
"""Creates a new OnCallOfTransformedFunction context manager.
Args:
fn_transformation: identifier of the function transformation e.g.
'jax.jit', 'jax.pmap', ...
callback_fn: A callback function which receives the transformed function
and its arguments on every call.
"""
self._fn_transformation = fn_transformation
self._callback_fn = callback_fn
self._patch: mock._patch[Callable[[Any], Any]] = None # pylint: disable=unsubscriptable-object
self._original_fn_transformation = None
def __enter__(self):
def _new_fn_transformation(fn, *args, **kwargs):
"""Returns a transformed version of the given function."""
transformed_fn = self._original_fn_transformation(fn, *args, **kwargs)
@functools.wraps(transformed_fn)
def _new_transformed_fn(*args, **kwargs):
"""Returns result of the returned function and calls the callback."""
self._callback_fn(transformed_fn, *args, **kwargs)
return transformed_fn(*args, **kwargs)
return _new_transformed_fn
self._patch = mock.patch(self._fn_transformation, _new_fn_transformation)
self._original_fn_transformation, unused_local = self._patch.get_original()
self._patch.start()
def __exit__(self, *unused_args):
self._patch.stop()
|
chex-master
|
chex/_src/fake.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type definitions to use for type annotations."""
from typing import Any, Iterable, Mapping, Union
import jax
import jax.numpy as jnp
import numpy as np
# Special types of arrays.
ArrayNumpy = np.ndarray
# For instance checking, use `isinstance(x, jax.Array)`.
ArrayDevice = jax.Array
# Types for backward compatibility.
ArraySharded = jax.Array
ArrayBatched = jax.Array
# Generic array type.
# Similar to `jax.typing.ArrayLike` but does not accept python scalar types.
Array = Union[
ArrayDevice, ArrayBatched, ArraySharded, # JAX array type
ArrayNumpy, # NumPy array type
np.bool_, np.number, # NumPy scalar types
]
# A tree of generic arrays.
ArrayTree = Union[Array, Iterable['ArrayTree'], Mapping[Any, 'ArrayTree']]
ArrayDeviceTree = Union[
ArrayDevice, Iterable['ArrayDeviceTree'], Mapping[Any, 'ArrayDeviceTree']
]
ArrayNumpyTree = Union[
ArrayNumpy, Iterable['ArrayNumpyTree'], Mapping[Any, 'ArrayNumpyTree']
]
# Other types.
Scalar = Union[float, int]
Numeric = Union[Array, Scalar]
Shape = jax.core.Shape
PRNGKey = Union[jax.random.KeyArray, jax.Array]
PyTreeDef = jax.tree_util.PyTreeDef
Device = jax.Device
ArrayDType = type(jnp.float32)
|
chex-master
|
chex/_src/pytypes.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `dataclass.py`."""
# pytype: disable=wrong-keyword-args # dataclass_transform
import copy
import dataclasses
import pickle
import sys
from typing import Any, Generic, Mapping, TypeVar
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import dataclass
from chex._src import pytypes
import cloudpickle
import jax
import numpy as np
import tree
chex_dataclass = dataclass.dataclass
mappable_dataclass = dataclass.mappable_dataclass
orig_dataclass = dataclasses.dataclass
@chex_dataclass
class NestedDataclass():
c: pytypes.ArrayDevice
d: pytypes.ArrayDevice
@chex_dataclass
class PostInitDataclass:
a: pytypes.ArrayDevice
def __post_init__(self):
if not self.a > 0:
raise ValueError('a should be > than 0')
@chex_dataclass
class ReverseOrderNestedDataclass():
# The order of c and d are switched comapred to NestedDataclass.
d: pytypes.ArrayDevice
c: pytypes.ArrayDevice
@chex_dataclass
class Dataclass():
a: NestedDataclass
b: pytypes.ArrayDevice
@chex_dataclass(frozen=True)
class FrozenDataclass():
a: NestedDataclass
b: pytypes.ArrayDevice
def dummy_dataclass(factor=1., frozen=False):
class_ctor = FrozenDataclass if frozen else Dataclass
return class_ctor(
a=NestedDataclass(
c=factor * np.ones((3,), dtype=np.float32),
d=factor * np.ones((4,), dtype=np.float32)),
b=factor * 2 * np.ones((5,), dtype=np.float32))
def _dataclass_instance_fields(dcls_instance):
"""Serialization-friendly version of dataclasses.fields for instances."""
attribute_dict = dcls_instance.__dict__
fields = []
for field in dcls_instance.__dataclass_fields__.values():
if field.name in attribute_dict: # Filter pseudo-fields.
fields.append(field)
return fields
@orig_dataclass
class ClassWithoutMap:
k: dict # pylint:disable=g-bare-generic
def some_method(self, *args):
raise RuntimeError('ClassWithoutMap.some_method() was called.')
def _get_mappable_dataclasses(test_type):
"""Generates shallow and nested mappable dataclasses."""
class Class:
"""Shallow class."""
k_tuple: tuple # pylint:disable=g-bare-generic
k_dict: dict # pylint:disable=g-bare-generic
def some_method(self, *args):
raise RuntimeError('Class.some_method() was called.')
class NestedClass:
"""Nested class."""
k_any: Any
k_int: int
k_str: str
k_arr: np.ndarray
k_dclass_with_map: Class
k_dclass_no_map: ClassWithoutMap
k_dict_factory: dict = dataclasses.field( # pylint:disable=g-bare-generic
default_factory=lambda: dict(x='x', y='y'))
k_default: str = 'default_str'
k_non_init: int = dataclasses.field(default=1, init=False)
k_init_only: dataclasses.InitVar[int] = 10
def some_method(self, *args):
raise RuntimeError('NestedClassWithMap.some_method() was called.')
def __post_init__(self, k_init_only):
self.k_non_init = self.k_int * k_init_only
if test_type == 'chex':
cls = chex_dataclass(Class, mappable_dataclass=True)
nested_cls = chex_dataclass(NestedClass, mappable_dataclass=True)
elif test_type == 'original':
cls = mappable_dataclass(orig_dataclass(Class))
nested_cls = mappable_dataclass(orig_dataclass(NestedClass))
else:
raise ValueError(f'Unknown test type: {test_type}')
return cls, nested_cls
@parameterized.named_parameters(('_original', 'original'), ('_chex', 'chex'))
class MappableDataclassTest(parameterized.TestCase):
def _init_testdata(self, test_type):
"""Initializes test data."""
map_cls, nested_map_cls = _get_mappable_dataclasses(test_type)
self.dcls_with_map_inner = map_cls(
k_tuple=(1, 2), k_dict=dict(k1=32, k2=33))
self.dcls_with_map_inner_inc = map_cls(
k_tuple=(2, 3), k_dict=dict(k1=33, k2=34))
self.dcls_no_map = ClassWithoutMap(k=dict(t='t', t2='t2'))
self.dcls_with_map = nested_map_cls(
k_any=None,
k_int=1,
k_str='test_str',
k_arr=np.array(16),
k_dclass_with_map=self.dcls_with_map_inner,
k_dclass_no_map=self.dcls_no_map)
self.dcls_with_map_inc_ints = nested_map_cls(
k_any=None,
k_int=2,
k_str='test_str',
k_arr=np.array(16),
k_dclass_with_map=self.dcls_with_map_inner_inc,
k_dclass_no_map=self.dcls_no_map,
k_default='default_str')
self.dcls_flattened_with_path = [
(('k_any',), None),
(('k_arr',), np.array(16)),
(('k_dclass_no_map',), self.dcls_no_map),
(('k_dclass_with_map', 'k_dict', 'k1'), 32),
(('k_dclass_with_map', 'k_dict', 'k2'), 33),
(('k_dclass_with_map', 'k_tuple', 0), 1),
(('k_dclass_with_map', 'k_tuple', 1), 2),
(('k_default',), 'default_str'),
(('k_dict_factory', 'x'), 'x'),
(('k_dict_factory', 'y'), 'y'),
(('k_int',), 1),
(('k_non_init',), 10),
(('k_str',), 'test_str'),
]
self.dcls_flattened_with_path_up_to = [
(('k_any',), None),
(('k_arr',), np.array(16)),
(('k_dclass_no_map',), self.dcls_no_map),
(('k_dclass_with_map',), self.dcls_with_map_inner),
(('k_default',), 'default_str'),
(('k_dict_factory', 'x'), 'x'),
(('k_dict_factory', 'y'), 'y'),
(('k_int',), 1),
(('k_non_init',), 10),
(('k_str',), 'test_str'),
]
self.dcls_flattened = [v for (_, v) in self.dcls_flattened_with_path]
self.dcls_flattened_up_to = [
v for (_, v) in self.dcls_flattened_with_path_up_to
]
self.dcls_tree_size = 18
self.dcls_tree_size_no_dicts = 14
def testFlattenAndUnflatten(self, test_type):
self._init_testdata(test_type)
self.assertEqual(self.dcls_flattened, tree.flatten(self.dcls_with_map))
self.assertEqual(
self.dcls_with_map,
tree.unflatten_as(self.dcls_with_map_inc_ints, self.dcls_flattened))
dataclass_in_seq = [34, self.dcls_with_map, [1, 2]]
dataclass_in_seq_flat = [34] + self.dcls_flattened + [1, 2]
self.assertEqual(dataclass_in_seq_flat, tree.flatten(dataclass_in_seq))
self.assertEqual(dataclass_in_seq,
tree.unflatten_as(dataclass_in_seq, dataclass_in_seq_flat))
def testFlattenUpTo(self, test_type):
self._init_testdata(test_type)
structure = copy.copy(self.dcls_with_map)
structure.k_dclass_with_map = None # Do not flatten 'k_dclass_with_map'
self.assertEqual(self.dcls_flattened_up_to,
tree.flatten_up_to(structure, self.dcls_with_map))
def testFlattenWithPath(self, test_type):
self._init_testdata(test_type)
self.assertEqual(
tree.flatten_with_path(self.dcls_with_map),
self.dcls_flattened_with_path)
def testFlattenWithPathUpTo(self, test_type):
self._init_testdata(test_type)
structure = copy.copy(self.dcls_with_map)
structure.k_dclass_with_map = None # Do not flatten 'k_dclass_with_map'
self.assertEqual(
tree.flatten_with_path_up_to(structure, self.dcls_with_map),
self.dcls_flattened_with_path_up_to)
def testMapStructure(self, test_type):
self._init_testdata(test_type)
add_one_to_ints_fn = lambda x: x + 1 if isinstance(x, int) else x
mapped_inc_ints = tree.map_structure(add_one_to_ints_fn, self.dcls_with_map)
self.assertEqual(self.dcls_with_map_inc_ints, mapped_inc_ints)
self.assertEqual(self.dcls_with_map_inc_ints.k_non_init,
self.dcls_with_map_inc_ints.k_int * 10)
self.assertEqual(mapped_inc_ints.k_non_init, mapped_inc_ints.k_int * 10)
def testMapStructureUpTo(self, test_type):
self._init_testdata(test_type)
structure = copy.copy(self.dcls_with_map)
structure.k_dclass_with_map = None # Do not map over 'k_dclass_with_map'
add_one_to_ints_fn = lambda x: x + 1 if isinstance(x, int) else x
mapped_inc_ints = tree.map_structure_up_to(structure, add_one_to_ints_fn,
self.dcls_with_map)
# k_dclass_with_map should be passed through unchanged
class_with_map = self.dcls_with_map.k_dclass_with_map
self.dcls_with_map_inc_ints.k_dclass_with_map = class_with_map
self.assertEqual(self.dcls_with_map_inc_ints, mapped_inc_ints)
self.assertEqual(self.dcls_with_map_inc_ints.k_non_init,
self.dcls_with_map_inc_ints.k_int * 10)
self.assertEqual(mapped_inc_ints.k_non_init, mapped_inc_ints.k_int * 10)
def testMapStructureWithPath(self, test_type):
self._init_testdata(test_type)
add_one_to_ints_fn = lambda path, x: x + 1 if isinstance(x, int) else x
mapped_inc_ints = tree.map_structure_with_path(add_one_to_ints_fn,
self.dcls_with_map)
self.assertEqual(self.dcls_with_map_inc_ints, mapped_inc_ints)
self.assertEqual(self.dcls_with_map_inc_ints.k_non_init,
self.dcls_with_map_inc_ints.k_int * 10)
self.assertEqual(mapped_inc_ints.k_non_init, mapped_inc_ints.k_int * 10)
def testMapStructureWithPathUpTo(self, test_type):
self._init_testdata(test_type)
structure = copy.copy(self.dcls_with_map)
structure.k_dclass_with_map = None # Do not map over 'k_dclass_with_map'
add_one_to_ints_fn = lambda path, x: x + 1 if isinstance(x, int) else x
mapped_inc_ints = tree.map_structure_with_path_up_to(
structure, add_one_to_ints_fn, self.dcls_with_map)
# k_dclass_with_map should be passed through unchanged
class_with_map = self.dcls_with_map.k_dclass_with_map
self.dcls_with_map_inc_ints.k_dclass_with_map = class_with_map
self.assertEqual(self.dcls_with_map_inc_ints, mapped_inc_ints)
self.assertEqual(self.dcls_with_map_inc_ints.k_non_init,
self.dcls_with_map_inc_ints.k_int * 10)
self.assertEqual(mapped_inc_ints.k_non_init, mapped_inc_ints.k_int * 10)
def testTraverse(self, test_type):
self._init_testdata(test_type)
visited = []
tree.traverse(visited.append, self.dcls_with_map, top_down=False)
self.assertLen(visited, self.dcls_tree_size)
visited_without_dicts = []
def visit_without_dicts(x):
visited_without_dicts.append(x)
return 'X' if isinstance(x, dict) else None
tree.traverse(visit_without_dicts, self.dcls_with_map, top_down=True)
self.assertLen(visited_without_dicts, self.dcls_tree_size_no_dicts)
def testIsDataclass(self, test_type):
self._init_testdata(test_type)
self.assertTrue(dataclasses.is_dataclass(self.dcls_no_map))
self.assertTrue(dataclasses.is_dataclass(self.dcls_with_map))
self.assertTrue(
dataclasses.is_dataclass(self.dcls_with_map.k_dclass_with_map))
self.assertTrue(
dataclasses.is_dataclass(self.dcls_with_map.k_dclass_no_map))
class DataclassesTest(parameterized.TestCase):
@parameterized.parameters([True, False])
def test_dataclass_tree_leaves(self, frozen):
obj = dummy_dataclass(frozen=frozen)
self.assertLen(jax.tree_util.tree_leaves(obj), 3)
@parameterized.parameters([True, False])
def test_dataclass_tree_map(self, frozen):
factor = 5.
obj = dummy_dataclass(frozen=frozen)
target_obj = dummy_dataclass(factor=factor, frozen=frozen)
asserts.assert_trees_all_close(
jax.tree_util.tree_map(lambda t: factor * t, obj), target_obj)
def test_tree_flatten_with_keys(self):
obj = dummy_dataclass()
keys_and_leaves, treedef = jax.tree_util.tree_flatten_with_path(obj)
self.assertEqual(
[k for k, _ in keys_and_leaves],
[
(jax.tree_util.GetAttrKey('a'), jax.tree_util.GetAttrKey('c')),
(jax.tree_util.GetAttrKey('a'), jax.tree_util.GetAttrKey('d')),
(jax.tree_util.GetAttrKey('b'),),
],
)
leaves = [l for _, l in keys_and_leaves]
new_obj = treedef.unflatten(leaves)
self.assertEqual(new_obj, obj)
def test_tree_map_with_keys(self):
obj = dummy_dataclass()
key_value_list, unused_treedef = jax.tree_util.tree_flatten_with_path(obj)
# Convert a list of key-value tuples to a dict.
flat_obj = dict(key_value_list)
def f(path, x):
value = flat_obj[path]
np.testing.assert_allclose(value, x)
return path
out = jax.tree_util.tree_map_with_path(f, obj)
self.assertEqual(
out.a.c, (jax.tree_util.GetAttrKey('a'), jax.tree_util.GetAttrKey('c'))
)
self.assertEqual(
out.a.d, (jax.tree_util.GetAttrKey('a'), jax.tree_util.GetAttrKey('d'))
)
self.assertEqual(out.b, (jax.tree_util.GetAttrKey('b'),))
def test_tree_map_with_keys_traversal_order(self):
# pytype: disable=wrong-arg-types
obj = ReverseOrderNestedDataclass(d=1, c=2)
# pytype: enable=wrong-arg-types
leaves = []
def f(_, x):
leaves.append(x)
jax.tree_util.tree_map_with_path(f, obj)
self.assertEqual(leaves, jax.tree_util.tree_leaves(obj))
@parameterized.parameters([True, False])
def test_dataclass_replace(self, frozen):
factor = 5.
obj = dummy_dataclass(frozen=frozen)
# pytype: disable=attribute-error # dataclass_transform
obj = obj.replace(a=obj.a.replace(c=factor * obj.a.c))
obj = obj.replace(a=obj.a.replace(d=factor * obj.a.d))
obj = obj.replace(b=factor * obj.b)
target_obj = dummy_dataclass(factor=factor, frozen=frozen)
asserts.assert_trees_all_close(obj, target_obj)
# pytype: enable=attribute-error
def test_dataclass_requires_kwargs_by_default(self):
factor = 1.0
with self.assertRaisesRegex(
ValueError,
"Mappable dataclass constructor doesn't support positional args.",
):
Dataclass(
NestedDataclass(
c=factor * np.ones((3,), dtype=np.float32),
d=factor * np.ones((4,), dtype=np.float32),
),
factor * 2 * np.ones((5,), dtype=np.float32),
)
def test_dataclass_mappable_dataclass_false(self):
factor = 1.0
@chex_dataclass(mappable_dataclass=False)
class NonMappableDataclass:
a: NestedDataclass
b: pytypes.ArrayDevice
NonMappableDataclass(
NestedDataclass(
c=factor * np.ones((3,), dtype=np.float32),
d=factor * np.ones((4,), dtype=np.float32),
),
factor * 2 * np.ones((5,), dtype=np.float32),
)
def test_inheritance_is_possible_thanks_to_kw_only(self):
if sys.version_info.minor < 10: # Feature only available for Python >= 3.10
return
@chex_dataclass(kw_only=True)
class Base:
default: int = 1
@chex_dataclass(kw_only=True)
class Child(Base):
non_default: int
Child(non_default=2)
def test_unfrozen_dataclass_is_mutable(self):
factor = 5.
obj = dummy_dataclass(frozen=False)
obj.a.c = factor * obj.a.c
obj.a.d = factor * obj.a.d
obj.b = factor * obj.b
target_obj = dummy_dataclass(factor=factor, frozen=False)
asserts.assert_trees_all_close(obj, target_obj)
def test_frozen_dataclass_raise_error(self):
factor = 5.
obj = dummy_dataclass(frozen=True)
obj.a.c = factor * obj.a.c # mutable since obj.a is not frozen.
with self.assertRaisesRegex(dataclass.FrozenInstanceError,
'cannot assign to field'):
obj.b = factor * obj.b # raises error because obj is frozen.
@parameterized.named_parameters(
('frozen', True),
('mutable', False),
)
def test_get_and_set_state(self, frozen):
@chex_dataclass(frozen=frozen)
class SimpleClass():
data: int = 1
obj_a = SimpleClass(data=1)
state = getattr(obj_a, '__getstate__')()
obj_b = SimpleClass(data=2)
getattr(obj_b, '__setstate__')(state)
self.assertEqual(obj_a, obj_b)
def test_unexpected_kwargs(self):
@chex_dataclass()
class SimpleDataclass:
a: int
b: int = 2
SimpleDataclass(a=1, b=3)
with self.assertRaisesRegex(ValueError, 'init.*got unexpected kwargs'):
SimpleDataclass(a=1, b=3, c=4) # pytype: disable=wrong-keyword-args
def test_tuple_conversion(self):
@chex_dataclass()
class SimpleDataclass:
b: int
a: int
obj = SimpleDataclass(a=2, b=1)
self.assertSequenceEqual(getattr(obj, 'to_tuple')(), (1, 2))
obj2 = getattr(SimpleDataclass, 'from_tuple')((1, 2))
self.assertEqual(obj.a, obj2.a)
self.assertEqual(obj.b, obj2.b)
@parameterized.named_parameters(
('frozen', True),
('mutable', False),
)
def test_tuple_rev_conversion(self, frozen):
obj = dummy_dataclass(frozen=frozen)
asserts.assert_trees_all_close(
type(obj).from_tuple(obj.to_tuple()), # pytype: disable=attribute-error
obj,
)
@parameterized.named_parameters(
('frozen', True),
('mutable', False),
)
def test_inheritance(self, frozen):
@chex_dataclass(frozen=frozen)
class Base:
x: int
@chex_dataclass(frozen=frozen)
class Derived(Base):
y: int
base_obj = Base(x=1)
self.assertNotIsInstance(base_obj, Derived)
self.assertIsInstance(base_obj, Base)
derived_obj = Derived(x=1, y=2)
self.assertIsInstance(derived_obj, Derived)
self.assertIsInstance(derived_obj, Base)
def test_inheritance_from_empty_frozen_base(self):
@chex_dataclass(frozen=True)
class FrozenBase:
pass
@chex_dataclass(frozen=True)
class DerivedFrozen(FrozenBase):
j: int
df = DerivedFrozen(j=2)
self.assertIsInstance(df, FrozenBase)
with self.assertRaisesRegex(
TypeError, 'cannot inherit non-frozen dataclass from a frozen one'):
# pylint:disable=unused-variable
@chex_dataclass
class DerivedMutable(FrozenBase):
j: int
# pylint:enable=unused-variable
def test_disallowed_fields(self):
# pylint:disable=unused-variable
with self.assertRaisesRegex(ValueError, 'dataclass fields are disallowed'):
@chex_dataclass(mappable_dataclass=False)
class InvalidNonMappable:
from_tuple: int
@chex_dataclass(mappable_dataclass=False)
class ValidMappable:
get: int
with self.assertRaisesRegex(ValueError, 'dataclass fields are disallowed'):
@chex_dataclass(mappable_dataclass=True)
class InvalidMappable:
get: int
from_tuple: int
# pylint:enable=unused-variable
@parameterized.parameters(True, False)
def test_flatten_is_leaf(self, is_mappable):
@chex_dataclass(mappable_dataclass=is_mappable)
class _InnerDcls:
v_1: int
v_2: int
@chex_dataclass(mappable_dataclass=is_mappable)
class _Dcls:
str_val: str
# pytype: disable=invalid-annotation # enable-bare-annotations
inner_dcls: _InnerDcls
dct: Mapping[str, _InnerDcls]
# pytype: enable=invalid-annotation # enable-bare-annotations
dcls = _Dcls(
str_val='test',
inner_dcls=_InnerDcls(v_1=1, v_2=11),
dct={
'md1': _InnerDcls(v_1=2, v_2=22),
'md2': _InnerDcls(v_1=3, v_2=33)
})
def _is_leaf(value) -> bool:
# Must not traverse over integers.
self.assertNotIsInstance(value, int)
return isinstance(value, (_InnerDcls, str))
leaves = jax.tree_util.tree_flatten(dcls, is_leaf=_is_leaf)[0]
self.assertCountEqual(
(dcls.str_val, dcls.inner_dcls, dcls.dct['md1'], dcls.dct['md2']),
leaves)
asserts.assert_trees_all_equal_structs(
jax.tree_util.tree_map(lambda x: x, dcls, is_leaf=_is_leaf), dcls)
def test_decorator_alias(self):
# Make sure, that creating a decorator alias works correctly.
configclass = chex_dataclass(frozen=True)
@configclass
class Foo:
bar: int = 1
toto: int = 2
@configclass
class Bar:
bar: int = 1
toto: int = 2
# Verify that both Foo and Bar are correctly registered with jax.tree_util.
self.assertLen(jax.tree_util.tree_flatten(Foo())[0], 2)
self.assertLen(jax.tree_util.tree_flatten(Bar())[0], 2)
@parameterized.named_parameters(
('mappable', True),
('not_mappable', False),
)
def test_generic_dataclass(self, mappable):
T = TypeVar('T')
@chex_dataclass(mappable_dataclass=mappable)
class GenericDataclass(Generic[T]):
a: T # pytype: disable=invalid-annotation # enable-bare-annotations
obj = GenericDataclass(a=np.array([1.0, 1.0]))
asserts.assert_trees_all_close(obj.a, 1.0)
def test_mappable_eq_override(self):
@chex_dataclass(mappable_dataclass=True)
class EqDataclass:
a: pytypes.ArrayDevice
def __eq__(self, other):
if isinstance(other, EqDataclass):
return other.a[0] == self.a[0]
return False
obj1 = EqDataclass(a=np.array([1.0, 1.0]))
obj2 = EqDataclass(a=np.array([1.0, 0.0]))
obj3 = EqDataclass(a=np.array([0.0, 1.0]))
self.assertEqual(obj1, obj2)
self.assertNotEqual(obj1, obj3)
@parameterized.parameters([NestedDataclass, ReverseOrderNestedDataclass])
def test_dataclass_instance_fields(self, dcls):
obj = dcls(c=1, d=2)
self.assertSequenceEqual(
dataclasses.fields(obj), _dataclass_instance_fields(obj))
@parameterized.parameters((pickle, NestedDataclass),
(cloudpickle, ReverseOrderNestedDataclass))
def test_roundtrip_serialization(self, serialization_lib, dcls):
obj = dcls(c=1, d=2)
obj_fields = [
(f.name, getattr(obj, f.name)) for f in dataclasses.fields(obj)
]
self.assertLen(obj_fields, 2)
obj2 = serialization_lib.loads(serialization_lib.dumps(obj))
obj2_fields = [(f.name, getattr(obj2, f.name))
for f in _dataclass_instance_fields(obj2)]
self.assertSequenceEqual(obj_fields, obj2_fields)
self.assertSequenceEqual(jax.tree_util.tree_leaves(obj2), [1, 2])
obj3 = jax.tree_util.tree_map(lambda x: x, obj2)
obj3_fields = [(f.name, getattr(obj3, f.name))
for f in _dataclass_instance_fields(obj3)]
self.assertSequenceEqual(obj_fields, obj3_fields)
self.assertSequenceEqual(jax.tree_util.tree_leaves(obj3), [1, 2])
@parameterized.parameters([NestedDataclass, ReverseOrderNestedDataclass])
def test_flatten_roundtrip_ordering(self, dcls):
obj = dcls(c=1, d=2)
leaves, treedef = jax.tree_util.tree_flatten(obj)
self.assertSequenceEqual(leaves, [1, 2])
obj2 = jax.tree_util.tree_unflatten(treedef, leaves)
self.assertSequenceEqual(dataclasses.fields(obj2), dataclasses.fields(obj))
def test_flatten_respects_post_init(self):
obj = PostInitDataclass(a=1) # pytype: disable=wrong-arg-types
with self.assertRaises(ValueError):
_ = jax.tree_util.tree_map(lambda x: 0, obj)
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/dataclass_test.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to hold expected dimension sizes."""
import re
from typing import Any, Collection, Dict, Optional, Sized, Tuple
Shape = Tuple[Optional[int], ...]
class Dimensions:
"""A lightweight utility that maps strings to shape tuples.
The most basic usage is:
.. code::
>>> dims = chex.Dimensions(B=3, T=5, N=7) # You can specify any letters.
>>> dims['NBT']
(7, 3, 5)
This is useful when dealing with many differently shaped arrays. For instance,
let's check the shape of this array:
.. code::
>>> x = jnp.array([[2, 0, 5, 6, 3],
... [5, 4, 4, 3, 3],
... [0, 0, 5, 2, 0]])
>>> chex.assert_shape(x, dims['BT'])
The dimension sizes can be gotten directly, e.g. :code:`dims.N == 7`. This can
be useful in many applications. For instance, let's one-hot encode our array.
.. code::
>>> y = jax.nn.one_hot(x, dims.N)
>>> chex.assert_shape(y, dims['BTN'])
You can also store the shape of a given array in :code:`dims`, e.g.
.. code::
>>> z = jnp.array([[0, 6, 0, 2],
... [4, 2, 2, 4]])
>>> dims['XY'] = z.shape
>>> dims
Dimensions(B=3, N=7, T=5, X=2, Y=4)
You can set a wildcard dimension, cf. :func:`chex.assert_shape`:
.. code::
>>> dims.W = None
>>> dims['BTW']
(3, 5, None)
Or you can use the wildcard character `'*'` directly:
.. code::
>>> dims['BT*']
(3, 5, None)
Single digits are interpreted as literal integers. Note that this notation
is limited to single-digit literals.
.. code::
>>> dims['BT123']
(3, 5, 1, 2, 3)
Support for single digits was mainly included to accommodate dummy axes
introduced for consistent broadcasting. For instance, instead of using
:func:`jnp.expand_dims <jax.numpy.expand_dims>` you could do the following:
.. code::
>>> w = y * x # Cannot broadcast (3, 5, 7) with (3, 5)
Traceback (most recent call last):
...
ValueError: Incompatible shapes for broadcasting: ((3, 5, 7), (1, 3, 5))
>>> w = y * x.reshape(dims['BT1'])
>>> chex.assert_shape(w, dims['BTN'])
Sometimes you only care about some array dimensions but not all. You can use
an underscore to ignore an axis, e.g.
.. code::
>>> chex.assert_rank(y, 3)
>>> dims['__M'] = y.shape # Skip the first two axes.
Finally note that a single-character key returns a tuple of length one.
.. code::
>>> dims['M']
(7,)
"""
# Tell static type checker not to worry about attribute errors.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, **dim_sizes) -> None:
for dim, size in dim_sizes.items():
self._setdim(dim, size)
def __getitem__(self, key: str) -> Shape:
self._validate_key(key)
return tuple(self._getdim(dim) for dim in key)
def __setitem__(self, key: str, value: Collection[Optional[int]]) -> None:
self._validate_key(key)
self._validate_value(value)
if len(key) != len(value):
raise ValueError(
f'key string {repr(key)} and shape {tuple(value)} '
'have different lengths')
for dim, size in zip(key, value):
self._setdim(dim, size)
def __delitem__(self, key: str) -> None:
self._validate_key(key)
for dim in key:
self._deldim(dim)
def __repr__(self) -> str:
args = ', '.join(f'{k}={v}' for k, v in sorted(self._asdict().items()))
return f'{type(self).__name__}({args})'
def _asdict(self) -> Dict[str, Optional[int]]:
return {k: v for k, v in self.__dict__.items()
if re.fullmatch(r'[a-zA-Z]', k)}
def _getdim(self, dim: str) -> Optional[int]:
if dim == '*':
return None
if re.fullmatch(r'[0-9]', dim):
return int(dim)
try:
return getattr(self, dim)
except AttributeError as e:
raise KeyError(dim) from e
def _setdim(self, dim: str, size: Optional[int]) -> None:
if dim == '_': # Skip.
return
self._validate_dim(dim)
setattr(self, dim, _optional_int(size))
def _deldim(self, dim: str) -> None:
if dim == '_': # Skip.
return
self._validate_dim(dim)
try:
return delattr(self, dim)
except AttributeError as e:
raise KeyError(dim) from e
def _validate_key(self, key: Any) -> None:
if not isinstance(key, str):
raise TypeError(f'key must be a string; got: {type(key).__name__}')
def _validate_value(self, value: Any) -> None:
if not isinstance(value, Sized):
raise TypeError(
'value must be sized, i.e. an object with a well-defined len(value); '
f'got object of type: {type(value).__name__}')
def _validate_dim(self, dim: Any) -> None:
if not isinstance(dim, str):
raise TypeError(
f'dimension name must be a string; got: {type(dim).__name__}')
if not re.fullmatch(r'[a-zA-Z]', dim):
raise KeyError(
'dimension names may only be contain letters (or \'_\' to skip); '
f'got dimension name: {repr(dim)}')
def _optional_int(x: Any) -> Optional[int]:
if x is None:
return None
try:
i = int(x)
if x == i:
return i
except ValueError:
pass
raise TypeError(f'object cannot be interpreted as a python int: {repr(x)}')
|
chex-master
|
chex/_src/dimensions.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chexification utilities."""
import atexit
import collections
from concurrent import futures
import dataclasses
import functools
import re
from typing import Any, Callable, FrozenSet
from absl import logging
from chex._src import asserts_internal as _ai
import jax
from jax.experimental import checkify
@dataclasses.dataclass(frozen=True)
class _ChexifyChecks:
"""A set of checks imported from checkify."""
user: FrozenSet[checkify.ErrorCategory] = checkify.user_checks
nan: FrozenSet[checkify.ErrorCategory] = checkify.nan_checks
index: FrozenSet[checkify.ErrorCategory] = checkify.index_checks
div: FrozenSet[checkify.ErrorCategory] = checkify.div_checks
float: FrozenSet[checkify.ErrorCategory] = checkify.float_checks
automatic: FrozenSet[checkify.ErrorCategory] = checkify.automatic_checks
all: FrozenSet[checkify.ErrorCategory] = checkify.all_checks
_chexify_error_pattern = re.compile(
re.escape(_ai.get_chexify_err_message('ANY', 'ANY')).replace('ANY', '.*')
)
def _check_error(err: checkify.Error) -> None:
"""Checks the error and converts it to chex format."""
try:
checkify.check_error(err)
except ValueError as exc:
msg = str(exc)
if _chexify_error_pattern.match(msg):
# Remove internal code pointers.
internal_info_pos = msg.rfind('(check failed at')
if internal_info_pos != -1:
msg = msg[:internal_info_pos]
raise AssertionError(msg) # pylint:disable=raise-missing-from
else:
raise
def block_until_chexify_assertions_complete() -> None:
"""Waits until all asynchronous checks complete.
See `chexify` for more detail.
"""
for wait_fn in _ai.CHEXIFY_STORAGE.wait_fns:
wait_fn()
@atexit.register # to catch uninspected error stats
def _check_if_hanging_assertions():
if _ai.CHEXIFY_STORAGE.wait_fns:
logging.warning(
'[Chex] Some of chexify assetion statuses were not inspected due to '
'async exec (https://jax.readthedocs.io/en/latest/async_dispatch.html).'
' Consider calling `chex.block_until_chexify_assertions_complete()` at '
'the end of computations that rely on jitted chex assetions.')
block_until_chexify_assertions_complete()
# Public API.
ChexifyChecks = _ChexifyChecks()
def chexify(
fn: Callable[..., Any],
async_check: bool = True,
errors: FrozenSet[checkify.ErrorCategory] = ChexifyChecks.user,
) -> Callable[..., Any]:
"""Wraps a transformed function `fn` to enable Chex value assertions.
Chex value/runtime assertions access concrete values of tensors (e.g.
`assert_tree_all_finite`) which are not available during JAX tracing, see
https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html
and
https://jax.readthedocs.io/en/latest/_modules/jax/_src/errors.html#ConcretizationTypeError.
This wrapper enables them in jitted/pmapped functions by performing a
specifically designed JAX transformation
https://jax.readthedocs.io/en/latest/debugging/checkify_guide.html#the-checkify-transformation
and calling functionalised checks
https://jax.readthedocs.io/en/latest/_autosummary/jax.experimental.checkify.check.html
Example:
.. code::
@chex.chexify
@jax.jit
def logp1_abs_safe(x: chex.Array) -> chex.Array:
chex.assert_tree_all_finite(x)
return jnp.log(jnp.abs(x) + 1)
logp1_abs_safe(jnp.ones(2)) # OK
logp1_abs_safe(jnp.array([jnp.nan, 3])) # FAILS
logp1_abs_safe.wait_checks()
Note 1: This wrapper allows identifying the first failed assertion in a jitted
code by printing a pointer to the line where the failed assertion was invoked.
For getting verbose messages (including concrete tensor values), an unjitted
version of the code will need to be executed with the same input values. Chex
does not currently provide tools to help with this.
Note 2: This wrapper fully supports asynchronous executions
(see https://jax.readthedocs.io/en/latest/async_dispatch.html).
To block program execution until asynchronous checks for a _chexified_
function `fn` complete, call `fn.wait_checks()`. Similarly,
`chex.block_until_chexify_assertions_complete()` will block program execution
until _all_ asyncronous checks complete.
Note 3: Chex automatically selects the backend for executing its assertions
(i.e. CPU or device accelerator) depending on the program context.
Note 4: Value assertions can have impact on the performance of a function, see
https://jax.readthedocs.io/en/latest/debugging/checkify_guide.html#limitations
Note 5: static assertions, such as `assert_shape` or
`assert_trees_all_equal_dtypes`, can be called from a jitted function without
`chexify` wrapper (since they do not access concrete values, only
shapes and/or dtypes which are available during JAX tracing).
More examples can be found at
https://github.com/deepmind/chex/blob/master/chex/_src/asserts_chexify_test.py
Args:
fn: A transformed function to wrap.
async_check: Whether to check errors in the async dispatch mode. See
https://jax.readthedocs.io/en/latest/async_dispatch.html.
errors: A set of `checkify.ErrorCategory` values which defines the set of
enabled checks. By default only explicit ``checks`` are enabled (`user`).
You can also for example enable NaN and Div-by-0 errors by passing the
`float` set, or for example combine multiple sets through set
operations (`float | user`).
Returns:
A _chexified_ function, i.e. the one with enabled value assertions.
The returned function has `wait_checks()` method that blocks the caller
until all pending async checks complete.
"""
# Hardware/XLA failures can only happen on the C++ side. They are expected to
# issue critical errors that will immediately crash the whole program.
# Nevertheless, Chex sets its own timeout for every chexified XLA comp. to
# ensure that a program never blocks on Chex side when running in async mode.
async_timeout = 1800 # 30 minutes
# Get function name.
if isinstance(fn, functools.partial):
func_name = fn.func.__name__
else:
func_name = fn.__name__
if async_check:
# Spawn a thread for processing blocking calls.
thread_pool = futures.ThreadPoolExecutor(1, f'async_chex_{func_name}')
# A deque for futures.
async_check_futures = collections.deque()
# Checkification.
checkified_fn = checkify.checkify(fn, errors=errors)
@functools.wraps(fn)
def _chexified_fn(*args, **kwargs):
if _ai.CHEXIFY_STORAGE.level:
raise RuntimeError(
'Nested @chexify wrapping is disallowed. '
'Make sure that you only wrap the function at the outermost level.')
if async_check:
# Check completed calls.
while async_check_futures and async_check_futures[0].done():
_check_error(async_check_futures.popleft().result(async_timeout))
# Run the checkified function.
_ai.CHEXIFY_STORAGE.level += 1
try:
err, out = checkified_fn(*args, **kwargs)
finally:
_ai.CHEXIFY_STORAGE.level -= 1
# Check errors.
if async_check:
# Blocking call is deferred to the thread.
async_check_futures.append(
thread_pool.submit(lambda: jax.device_get(err)))
else:
# Blocks until `fn`'s outputs are ready.
_check_error(err)
return out
def _wait_checks():
if async_check:
while async_check_futures:
_check_error(async_check_futures.popleft().result(async_timeout))
# Add a barrier callback to the global storage.
_ai.CHEXIFY_STORAGE.wait_fns.append(_wait_checks)
# Add the callback to the chexified funtion's properties.
if not hasattr(_chexified_fn, 'wait_checks'):
_chexified_fn.wait_checks = _wait_checks
else:
logging.warning(
"Function %s already defines 'wait_checks' method; "
'Chex will not redefine it.', func_name)
return _chexified_fn
def with_jittable_assertions(fn: Callable[..., Any],
async_check: bool = True) -> Callable[..., Any]:
"""An alias for `chexify` (see the docs)."""
return chexify(fn, async_check)
|
chex-master
|
chex/_src/asserts_chexify.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX/dm-tree friendly dataclass implementation reusing Python dataclasses."""
import collections
import dataclasses
import functools
import sys
from absl import logging
import jax
from typing_extensions import dataclass_transform # pytype: disable=not-supported-yet
FrozenInstanceError = dataclasses.FrozenInstanceError
_RESERVED_DCLS_FIELD_NAMES = frozenset(("from_tuple", "replace", "to_tuple"))
def mappable_dataclass(cls):
"""Exposes dataclass as ``collections.abc.Mapping`` descendent.
Allows to traverse dataclasses in methods from `dm-tree` library.
NOTE: changes dataclasses constructor to dict-type
(i.e. positional args aren't supported; however can use generators/iterables).
Args:
cls: A dataclass to mutate.
Returns:
Mutated dataclass implementing ``collections.abc.Mapping`` interface.
"""
if not dataclasses.is_dataclass(cls):
raise ValueError(f"Expected dataclass, got {cls} (change wrappers order?).")
# Define methods for compatibility with `collections.abc.Mapping`.
setattr(cls, "__getitem__", lambda self, x: self.__dict__[x])
setattr(cls, "__len__", lambda self: len(self.__dict__))
setattr(cls, "__iter__", lambda self: iter(self.__dict__))
# Update constructor.
orig_init = cls.__init__
all_fields = set(f.name for f in cls.__dataclass_fields__.values())
init_fields = [f.name for f in cls.__dataclass_fields__.values() if f.init]
@functools.wraps(orig_init)
def new_init(self, *orig_args, **orig_kwargs):
if (orig_args and orig_kwargs) or len(orig_args) > 1:
raise ValueError(
"Mappable dataclass constructor doesn't support positional args."
"(it has the same constructor as python dict)")
all_kwargs = dict(*orig_args, **orig_kwargs)
unknown_kwargs = set(all_kwargs.keys()) - all_fields
if unknown_kwargs:
raise ValueError(f"__init__() got unexpected kwargs: {unknown_kwargs}.")
# Pass only arguments corresponding to fields with `init=True`.
valid_kwargs = {k: v for k, v in all_kwargs.items() if k in init_fields}
orig_init(self, **valid_kwargs)
cls.__init__ = new_init
# Update base class to derive from Mapping
dct = dict(cls.__dict__)
if "__dict__" in dct:
dct.pop("__dict__") # Avoid self-references.
# Remove object from the sequence of base classes. Deriving from both Mapping
# and object will cause a failure to create a MRO for the updated class
bases = tuple(b for b in cls.__bases__ if b != object)
cls = type(cls.__name__, bases + (collections.abc.Mapping,), dct)
return cls
@dataclass_transform()
def dataclass(
cls=None,
*,
init=True,
repr=True, # pylint: disable=redefined-builtin
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
kw_only: bool = False,
mappable_dataclass=True, # pylint: disable=redefined-outer-name
):
"""JAX-friendly wrapper for :py:func:`dataclasses.dataclass`.
This wrapper class registers new dataclasses with JAX so that tree utils
operate correctly. Additionally a replace method is provided making it easy
to operate on the class when made immutable (frozen=True).
Args:
cls: A class to decorate.
init: See :py:func:`dataclasses.dataclass`.
repr: See :py:func:`dataclasses.dataclass`.
eq: See :py:func:`dataclasses.dataclass`.
order: See :py:func:`dataclasses.dataclass`.
unsafe_hash: See :py:func:`dataclasses.dataclass`.
frozen: See :py:func:`dataclasses.dataclass`.
kw_only: See :py:func:`dataclasses.dataclass`.
mappable_dataclass: If True (the default), methods to make the class
implement the :py:class:`collections.abc.Mapping` interface will be
generated and the class will include :py:class:`collections.abc.Mapping`
in its base classes.
`True` is the default, because being an instance of `Mapping` makes
`chex.dataclass` compatible with e.g. `jax.tree_util.tree_*` methods, the
`tree` library, or methods related to tensorflow/python/utils/nest.py.
As a side-effect, e.g. `np.testing.assert_array_equal` will only check
the field names are equal and not the content. Use `chex.assert_tree_*`
instead.
Returns:
A JAX-friendly dataclass.
"""
def dcls(cls):
# Make sure to create a separate _Dataclass instance for each `cls`.
return _Dataclass(
init, repr, eq, order, unsafe_hash, frozen, kw_only, mappable_dataclass
)(cls)
if cls is None:
return dcls
return dcls(cls)
class _Dataclass():
"""JAX-friendly wrapper for `dataclasses.dataclass`."""
def __init__(
self,
init=True,
repr=True, # pylint: disable=redefined-builtin
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
kw_only=False,
mappable_dataclass=True, # pylint: disable=redefined-outer-name
):
self.init = init
self.repr = repr # pylint: disable=redefined-builtin
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
self.kw_only = kw_only
self.mappable_dataclass = mappable_dataclass
def __call__(self, cls):
"""Forwards class to dataclasses's wrapper and registers it with JAX."""
# Remove once https://github.com/python/cpython/pull/24484 is merged.
for base in cls.__bases__:
if (dataclasses.is_dataclass(base) and
getattr(base, "__dataclass_params__").frozen and not self.frozen):
raise TypeError("cannot inherit non-frozen dataclass from a frozen one")
# `kw_only` is only available starting from 3.10.
version_dependent_args = {}
version = sys.version_info
if version.major == 3 and version.minor >= 10:
version_dependent_args = {"kw_only": self.kw_only}
# pytype: disable=wrong-keyword-args
dcls = dataclasses.dataclass(
cls,
init=self.init,
repr=self.repr,
eq=self.eq,
order=self.order,
unsafe_hash=self.unsafe_hash,
frozen=self.frozen,
**version_dependent_args,
)
# pytype: enable=wrong-keyword-args
fields_names = set(f.name for f in dataclasses.fields(dcls))
invalid_fields = fields_names.intersection(_RESERVED_DCLS_FIELD_NAMES)
if invalid_fields:
raise ValueError(f"The following dataclass fields are disallowed: "
f"{invalid_fields} ({dcls}).")
if self.mappable_dataclass:
dcls = mappable_dataclass(dcls)
# We remove `collection.abc.Mapping` mixin methods here to allow
# fields with these names.
for attr in ("values", "keys", "get", "items"):
setattr(dcls, attr, None) # redefine
delattr(dcls, attr) # delete
def _from_tuple(args):
return dcls(zip(dcls.__dataclass_fields__.keys(), args))
def _to_tuple(self):
return tuple(getattr(self, k) for k in self.__dataclass_fields__.keys())
def _replace(self, **kwargs):
return dataclasses.replace(self, **kwargs)
def _getstate(self):
return self.__dict__
# Patch __setstate__ to register the object on deserialization.
def _setstate(self, state):
register_dataclass_type_with_jax_tree_util(dcls)
self.__dict__.update(state)
orig_init = dcls.__init__
# Patch object's __init__ such that the class is registered on creation if
# it is not registered on deserialization.
@functools.wraps(orig_init)
def _init(self, *args, **kwargs):
register_dataclass_type_with_jax_tree_util(dcls)
return orig_init(self, *args, **kwargs)
setattr(dcls, "from_tuple", _from_tuple)
setattr(dcls, "to_tuple", _to_tuple)
setattr(dcls, "replace", _replace)
setattr(dcls, "__getstate__", _getstate)
setattr(dcls, "__setstate__", _setstate)
setattr(dcls, "__init__", _init)
return dcls
def _dataclass_unflatten(dcls, keys, values):
"""Creates a chex dataclass from a flatten jax.tree_util representation."""
dcls_object = dcls.__new__(dcls)
attribute_dict = dict(zip(keys, values))
# Looping over fields instead of keys & values preserves the field order.
# Using dataclasses.fields fails because dataclass uids change after
# serialisation (eg, with cloudpickle).
for field in dcls.__dataclass_fields__.values():
if field.name in attribute_dict: # Filter pseudo-fields.
object.__setattr__(dcls_object, field.name, attribute_dict[field.name])
# Need to manual call post_init here as we have avoided calling __init__
if getattr(dcls_object, "__post_init__", None):
dcls_object.__post_init__()
return dcls_object
def _flatten_with_path(dcls):
path = []
keys = []
for k, v in sorted(dcls.__dict__.items()):
k = jax.tree_util.GetAttrKey(k)
path.append((k, v))
keys.append(k)
return path, keys
@functools.cache
def register_dataclass_type_with_jax_tree_util(data_class):
"""Register an existing dataclass so JAX knows how to handle it.
This means that functions in jax.tree_util operate over the fields
of the dataclass. See
https://jax.readthedocs.io/en/latest/pytrees.html#extending-pytrees
for further information.
Args:
data_class: A class created using dataclasses.dataclass. It must be
constructable from keyword arguments corresponding to the members exposed
in instance.__dict__.
"""
flatten = lambda d: jax.util.unzip2(sorted(d.__dict__.items()))[::-1]
unflatten = functools.partial(_dataclass_unflatten, data_class)
try:
jax.tree_util.register_pytree_with_keys(
nodetype=data_class, flatten_with_keys=_flatten_with_path,
flatten_func=flatten, unflatten_func=unflatten)
except ValueError:
logging.info("%s is already registered as JAX PyTree node.", data_class)
|
chex-master
|
chex/_src/dataclass.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `asserts.py`."""
import functools
import re
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import asserts_internal
from chex._src import variants
import jax
import jax.numpy as jnp
import numpy as np
_get_err_regex = asserts_internal.get_err_regex
_num_devices_available = asserts_internal.num_devices_available
def as_arrays(arrays):
return [np.asarray(a) for a in arrays]
def array_from_shape(*shape):
return np.ones(shape=shape)
def emplace(arrays):
return arrays
class AssertsSwitchTest(parameterized.TestCase):
"""Tests for enable/disable_asserts."""
def test_enable_disable_asserts(self):
with self.assertRaisesRegex(AssertionError, _get_err_regex('scalar')):
asserts.assert_scalar('test')
asserts.disable_asserts()
asserts.assert_scalar('test')
asserts.enable_asserts()
with self.assertRaisesRegex(AssertionError, _get_err_regex('scalar')):
asserts.assert_scalar('test')
asserts.disable_asserts()
asserts.assert_is_divisible(13, 5)
# To avoid side effects.
asserts.enable_asserts()
class AssertMaxTracesTest(variants.TestCase):
def setUp(self):
super().setUp()
asserts.clear_trace_counter()
def _init(self, fn_, init_type, max_traces, kwargs, static_arg):
"""Initializes common test cases."""
variant_kwargs = {}
if static_arg:
variant_kwargs['static_argnums'] = 1
if kwargs:
args, kwargs = [], {'n': max_traces}
else:
args, kwargs = [max_traces], {}
if init_type == 't1':
@asserts.assert_max_traces(*args, **kwargs)
def fn(x, y):
if static_arg:
self.assertNotIsInstance(y, jax.core.Tracer)
return fn_(x, y)
fn_jitted = self.variant(fn, **variant_kwargs)
elif init_type == 't2':
def fn(x, y):
if static_arg:
self.assertNotIsInstance(y, jax.core.Tracer)
return fn_(x, y)
fn = asserts.assert_max_traces(fn, *args, **kwargs)
fn_jitted = self.variant(fn, **variant_kwargs)
elif init_type == 't3':
def fn(x, y):
if static_arg:
self.assertNotIsInstance(y, jax.core.Tracer)
return fn_(x, y)
@self.variant(**variant_kwargs)
@asserts.assert_max_traces(*args, **kwargs)
def fn_jitted(x, y):
self.assertIsInstance(x, jax.core.Tracer)
return fn_(x, y)
else:
raise ValueError(f'Unknown type {init_type}.')
return fn, fn_jitted
@variants.variants(with_jit=True, with_pmap=True)
@parameterized.named_parameters(
variants.params_product((
('type1', 't1'),
('type2', 't2'),
('type3', 't3'),
), (
('args', False),
('kwargs', True),
), (
('no_static_arg', False),
('with_static_arg', True),
), (
('max_traces_0', 0),
('max_traces_1', 1),
('max_traces_2', 2),
('max_traces_10', 10),
),
named=True))
def test_assert(self, init_type, kwargs, static_arg, max_traces):
fn_ = lambda x, y: x + y
fn, fn_jitted = self._init(fn_, init_type, max_traces, kwargs, static_arg)
# Original function.
for _ in range(max_traces + 3):
self.assertEqual(fn(1, 2), 3)
# Every call results in re-tracing because arguments' shapes are different.
for i in range(max_traces):
for k in range(5):
arg = jnp.zeros(i + 1) + k
np.testing.assert_array_equal(fn_jitted(arg, 2), arg + 2)
# Original function.
for _ in range(max_traces + 3):
self.assertEqual(fn(1, 2), 3)
self.assertEqual(fn([1], [2]), [1, 2])
self.assertEqual(fn('a', 'b'), 'ab')
# (max_traces + 1)-th re-tracing.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('fn.* is traced > .* times!')):
arg = jnp.zeros(max_traces + 1)
fn_jitted(arg, 2)
def test_incorrect_ordering(self):
# pylint:disable=g-error-prone-assert-raises,unused-variable
with self.assertRaisesRegex(ValueError, 'change wrappers ordering'):
@asserts.assert_max_traces(1)
@jax.jit
def fn(_):
pass
def dummy_wrapper(fn):
@functools.wraps(fn)
def fn_wrapped():
return fn()
return fn_wrapped
with self.assertRaisesRegex(ValueError, 'change wrappers ordering'):
@asserts.assert_max_traces(1)
@dummy_wrapper
@jax.jit
def fn_2():
pass
# pylint:enable=g-error-prone-assert-raises,unused-variable
def test_redefined_traced_function(self):
def outer_fn(x):
@jax.jit
@asserts.assert_max_traces(3)
def inner_fn(y):
return y.sum()
return inner_fn(2 * x)
self.assertEqual(outer_fn(1), 2)
self.assertEqual(outer_fn(2), 4)
self.assertEqual(outer_fn(3), 6)
# Fails since the traced inner function is redefined at each call.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('fn.* is traced > .* times!')):
outer_fn(4)
asserts.clear_trace_counter()
for i in range(10):
if i > 2:
with self.assertRaisesRegex(
AssertionError, _get_err_regex('fn.* is traced > .* times!')):
outer_fn(1)
else:
outer_fn(1)
def test_nested_functions(self):
@jax.jit
def jitted_outer_fn(x):
@jax.jit
@asserts.assert_max_traces(1)
def inner_fn(y):
return y.sum()
return inner_fn(2 * x)
# Inner assert_max_traces have no effect since the outer_fn is traced once.
for i in range(10):
self.assertEqual(jitted_outer_fn(i), 2 * i)
class ScalarAssertTest(parameterized.TestCase):
def test_scalar(self):
asserts.assert_scalar(1)
asserts.assert_scalar(1.)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('must be a scalar')):
asserts.assert_scalar(np.array(1.)) # pytype: disable=wrong-arg-types
def test_scalar_positive(self):
asserts.assert_scalar_positive(0.5)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('must be positive')):
asserts.assert_scalar_positive(-0.5)
def test_scalar_non_negative(self):
asserts.assert_scalar_non_negative(0.5)
asserts.assert_scalar_non_negative(0.)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('must be non-negative')):
asserts.assert_scalar_non_negative(-0.5)
def test_scalar_negative(self):
asserts.assert_scalar_negative(-0.5)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('argument must be negative')):
asserts.assert_scalar_negative(0.5)
def test_scalar_in(self):
asserts.assert_scalar_in(0.5, 0, 1)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('argument must be in')):
asserts.assert_scalar_in(-0.5, 0, 1)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('argument must be in')):
asserts.assert_scalar_in(1.5, 0, 1)
def test_scalar_in_excluded(self):
asserts.assert_scalar_in(0.5, 0, 1, included=False)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('argument must be in')):
asserts.assert_scalar_in(0, 0, 1, included=False)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('argument must be in')):
asserts.assert_scalar_in(1, 0, 1, included=False)
class EqualSizeAssertTest(parameterized.TestCase):
@parameterized.named_parameters(
('scalar_vector_matrix', [1, 2, [3], [[4, 5]]]),
('vector_matrix', [[1], [2], [[3, 5]]]),
('matrix', [[[1, 2]], [[3], [4], [5]]]),
)
def test_equal_size_should_fail(self, arrays):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Arrays have different sizes')
):
asserts.assert_equal_size(arrays)
@parameterized.named_parameters(
('scalar_vector_matrix', [1, 2, [3], [[4]]]),
('vector_matrix', [[1], [2], [[3]]]),
('matrix', [[[1, 2]], [[3], [4]]]),
)
def test_equal_size_should_pass(self, arrays):
arrays = as_arrays(arrays)
asserts.assert_equal_size(arrays)
class SizeAssertTest(parameterized.TestCase):
@parameterized.named_parameters(
('wrong_size', [1, 2], 2),
('some_wrong_size', [[1, 2], [3, 4]], (2, 3)),
('wrong_common_shape', [[1, 2], [3, 4, 3]], 3),
('wrong_common_shape_2', [[1, 2, 3], [1, 2]], 2),
('some_wrong_size_set', [[1, 2], [3, 4]], (2, {3, 4})),
)
def test_size_should_fail(self, arrays, sizes):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('input .+ has size .+ but expected .+')):
asserts.assert_size(arrays, sizes)
@parameterized.named_parameters(
('too_many_sizes', [[1]], (1, 1)),
('not_enough_sizes', [[1, 2], [3, 4], [5, 6]], (2, 2)),
)
def test_size_should_fail_wrong_length(self, arrays, sizes):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Length of `inputs` and `expected_sizes` must match')):
asserts.assert_size(arrays, sizes)
@parameterized.named_parameters(
('scalars', [1, 2], 1),
('vectors', [[1, 2], [3, 4, 5]], [2, 3]),
('matrices', [[[1, 2], [3, 4]]], 4),
('common_size_set', [[[1, 2], [3, 4]], [[1], [3]]], (4, {1, 2})),
)
def test_size_should_pass(self, arrays, sizes):
arrays = as_arrays(arrays)
asserts.assert_size(arrays, sizes)
def test_pytypes_pass(self):
arrays = as_arrays([[[1, 2], [3, 4]], [[1], [3]]])
asserts.assert_size(arrays, (4, None))
asserts.assert_size(arrays, (4, {1, 2}))
asserts.assert_size(arrays, (4, ...))
@parameterized.named_parameters(
('single_ellipsis', [[1, 2, 3, 4], [1, 2]], (..., 2)),
('multiple_ellipsis', [[1, 2, 3], [1, 2, 3]], (..., ...)),
)
def test_ellipsis_should_pass(self, arrays, expected_size):
arrays = as_arrays(arrays)
asserts.assert_size(arrays, expected_size)
class EqualShapeAssertTest(parameterized.TestCase):
@parameterized.named_parameters(
('not_scalar', [1, 2, [3]]),
('wrong_rank', [[1], [2], 3]),
('wrong_length', [[1], [2], [3, 4]]),
)
def test_equal_shape_should_fail(self, arrays):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Arrays have different shapes')):
asserts.assert_equal_shape(arrays)
@parameterized.named_parameters(
('scalars', [1, 2, 3]),
('vectors', [[1], [2], [3]]),
('matrices', [[[1], [2]], [[3], [4]]]),
)
def test_equal_shape_should_pass(self, arrays):
arrays = as_arrays(arrays)
asserts.assert_equal_shape(arrays)
@parameterized.named_parameters(
('scalars', [1, 2, 3]),
('vectors', [[1], [2], [[3, 4]]]),
)
def test_equal_shape_prefix_should_pass(self, arrays):
arrays = as_arrays(arrays)
asserts.assert_equal_shape_prefix(arrays, prefix_len=1)
@parameterized.named_parameters(
('scalars', [1, 2, [3]]),
('vectors', [[1], [2], [[3], [4]]]),
)
def test_equal_shape_prefix_should_fail(self, arrays):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('different shape prefixes')):
asserts.assert_equal_shape_prefix(arrays, prefix_len=1)
@parameterized.named_parameters(
('first_dim', [[2, 3], [2, 4], [2, 5]], 0),
('last_dim', [[3, 5, 7], [2, 7], [4, 7]], -1), # Note different ranks.
('first_few_dims', [[1, 2, 3], [1, 2, 4], [1, 2, 5]], [0, 1]),
('first_and_last', [[1, 2, 1], [1, 3, 1], [1, 4, 1]], [0, 2]),
('first_and_last_neg', [[1, 2, 3, 4], [1, 5, 4], [1, 4]], [0, -1]),
)
def test_equal_shape_at_dims_should_pass(self, shapes, dims):
arrays = [array_from_shape(*shape) for shape in shapes]
asserts.assert_equal_shape(arrays, dims=dims)
@parameterized.named_parameters(
('first_dim', [[1, 2], [2, 2]], 0),
('last_dim', [[1, 3], [1, 4]], 1),
('last_dim_neg', [[1, 3], [1, 4]], -1),
('multiple_dims', [[1, 2, 3], [1, 2, 4]], [0, 2]),
)
def test_equal_shape_at_dims_should_fail(self, shapes, dims):
arrays = [array_from_shape(*shape) for shape in shapes]
with self.assertRaisesRegex(
AssertionError, _get_err_regex('have different shapes at dims')):
asserts.assert_equal_shape(arrays, dims=dims)
class ShapeAssertTest(parameterized.TestCase):
@parameterized.named_parameters(
('wrong_rank', [1], (1,)),
('wrong_shape', [1, 2], (1, 3)),
('some_wrong_shape', [[1, 2], [3, 4]], [(1, 2), (1, 3)]),
('wrong_common_shape', [[1, 2], [3, 4, 3]], (2,)),
('wrong_common_shape_2', [[1, 2, 3], [1, 2]], (2,)),
('some_wrong_shape_set', [[1, 2], [3, 4]], [(1, 2), (1, {3, 4})]),
)
def test_shape_should_fail(self, arrays, shapes):
arrays = as_arrays(arrays)
with self.subTest('list'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('input .+ has shape .+ but expected .+')):
asserts.assert_shape(arrays, list(shapes))
with self.subTest('tuple'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('input .+ has shape .+ but expected .+')):
asserts.assert_shape(arrays, tuple(shapes))
@parameterized.named_parameters(
('too_many_shapes', [[1]], [(1,), (2,)]),
('not_enough_shapes', [[1, 2], [3, 4]], [(3,)]),
)
def test_shape_should_fail_wrong_length(self, arrays, shapes):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Length of `inputs` and `expected_shapes` must match')):
asserts.assert_shape(arrays, tuple(shapes))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Length of `inputs` and `expected_shapes` must match')):
asserts.assert_shape(arrays, list(shapes))
@parameterized.named_parameters(
('scalars', [1, 2], ()),
('vectors', [[1, 2], [3, 4, 5]], [(2,), (3,)]),
('matrices', [[[1, 2], [3, 4]]], (2, 2)),
('matrices_variable_shape', [[[1, 2], [3, 4]]], (None, 2)),
('vectors_common_shape', [[1, 2], [3, 4]], (2,)),
('variable_common_shape', [[[1, 2], [3, 4]], [[1], [3]]], (2, None)),
('common_shape_set', [[[1, 2], [3, 4]], [[1], [3]]], (2, {1, 2})),
)
def test_shape_should_pass(self, arrays, shapes):
arrays = as_arrays(arrays)
with self.subTest('tuple'):
asserts.assert_shape(arrays, tuple(shapes))
with self.subTest('list'):
asserts.assert_shape(arrays, list(shapes))
@parameterized.named_parameters(
('variable_shape', (2, None)),
('shape_set', (2, {1, 2})),
('suffix', (2, ...)),
)
def test_pytypes_pass(self, shape):
arrays = as_arrays([[[1, 2], [3, 4]], [[1], [3]]])
with self.subTest('tuple'):
asserts.assert_shape(arrays, tuple(shape))
with self.subTest('list'):
asserts.assert_shape(arrays, list(shape))
@parameterized.named_parameters(
('prefix_2', array_from_shape(2, 3, 4, 5, 6), (..., 4, 5, 6)),
('prefix_1', array_from_shape(3, 4, 5, 6), (..., 4, 5, 6)),
('prefix_0', array_from_shape(4, 5, 6), (..., 4, 5, 6)),
('inner_2', array_from_shape(2, 3, 4, 5, 6), (2, 3, ..., 6)),
('inner_1', array_from_shape(2, 3, 4, 6), (2, 3, ..., 6)),
('inner_0', array_from_shape(2, 3, 6), (2, 3, ..., 6)),
('suffix_2', array_from_shape(2, 3, 4, 5, 6), (2, 3, 4, ...)),
('suffix_1', array_from_shape(2, 3, 4, 5), (2, 3, 4, ...)),
('suffix_0', array_from_shape(2, 3, 4), (2, 3, 4, ...)),
)
def test_ellipsis_should_pass(self, array, expected_shape):
with self.subTest('list'):
asserts.assert_shape(array, list(expected_shape))
with self.subTest('tuple'):
asserts.assert_shape(array, tuple(expected_shape))
@parameterized.named_parameters(
('prefix', array_from_shape(3, 1, 5), (..., 4, 5, 6)),
('inner_bad_prefix', array_from_shape(2, 1, 4, 6), (2, 3, ..., 6)),
('inner_bad_suffix', array_from_shape(2, 3, 1, 5), (2, 3, ..., 6)),
('inner_both_bad', array_from_shape(2, 1, 4, 5), (2, 3, ..., 6)),
('suffix', array_from_shape(2, 3, 1, 5), (2, 3, 4, ...)),
('short_rank_prefix', array_from_shape(2, 3), (..., 4, 5, 6)),
('short_rank_inner', array_from_shape(2, 3), (2, 3, ..., 6)),
('short_rank_suffix', array_from_shape(2, 3), (2, 3, 4, ...)),
)
def test_ellipsis_should_fail(self, array, expected_shape):
with self.subTest('tuple'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('input .+ has shape .+ but expected .+')):
asserts.assert_shape(array, tuple(expected_shape))
with self.subTest('list'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('input .+ has shape .+ but expected .+')):
asserts.assert_shape(array, list(expected_shape))
@parameterized.named_parameters(
('prefix_and_suffix', array_from_shape(2, 3), (..., 2, 3, ...)),)
def test_multiple_ellipses(self, array, expected_shape):
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
ValueError,
'`expected_shape` may not contain more than one ellipsis, but got .+'):
asserts.assert_shape(array, expected_shape)
def rank_array(n):
return np.zeros(shape=[2] * n)
class BroadcastAssertTest(parameterized.TestCase):
@parameterized.parameters(
{'shape_a': (), 'shape_b': ()},
{'shape_a': (), 'shape_b': (2, 3)},
{'shape_a': (2, 3), 'shape_b': (2, 3)},
{'shape_a': (1, 3), 'shape_b': (2, 3)},
{'shape_a': (2, 1), 'shape_b': (2, 3)},
{'shape_a': (4,), 'shape_b': (2, 3, 4)},
{'shape_a': (3, 4), 'shape_b': (2, 3, 4)},
)
def test_shapes_are_broadcastable(self, shape_a, shape_b):
asserts.assert_is_broadcastable(shape_a, shape_b)
@parameterized.parameters(
{'shape_a': (2,), 'shape_b': ()},
{'shape_a': (2, 3, 4), 'shape_b': (3, 4)},
{'shape_a': (3, 5), 'shape_b': (3, 4)},
{'shape_a': (3, 4), 'shape_b': (3, 1)},
{'shape_a': (3, 4), 'shape_b': (1, 4)},
)
def test_shapes_are_not_broadcastable(self, shape_a, shape_b):
with self.assertRaises(AssertionError):
asserts.assert_is_broadcastable(shape_a, shape_b)
class RankAssertTest(parameterized.TestCase):
def test_rank_should_fail_array_expectations(self):
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
ValueError,
'expected ranks should be .* but was an array'):
asserts.assert_rank(rank_array(2), np.array([2]))
def test_rank_should_fail_wrong_expectation_structure(self):
# pytype: disable=wrong-arg-types
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
ValueError, 'Expected ranks should be integers or sets of integers'):
asserts.assert_rank(rank_array(2), [[1, 2]])
with self.assertRaisesRegex( # pylint: disable=g-error-prone-assert-raises
ValueError, 'Expected ranks should be integers or sets of integers'):
asserts.assert_rank([rank_array(1), rank_array(2)], [[1], [2]])
# pytype: enable=wrong-arg-types
@parameterized.named_parameters(
('rank_1', rank_array(1), 2),
('rank_2', rank_array(2), 1),
('rank_3', rank_array(3), {2, 4}),
)
def test_rank_should_fail_single(self, array, rank):
array = np.asarray(array)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('input .+ has rank .+ but expected .+')):
asserts.assert_rank(array, rank)
@parameterized.named_parameters(
('wrong_1', [rank_array(1), rank_array(2)], [2, 2]),
('wrong_2', [rank_array(1), rank_array(2)], [1, 3]),
('wrong_3', [rank_array(1), rank_array(2)], [{2, 3}, 2]),
('wrong_4', [rank_array(1), rank_array(2)], [1, {1, 3}]),
)
def test_assert_rank_should_fail_sequence(self, arrays, ranks):
arrays = as_arrays(arrays)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('input .+ has rank .+ but expected .+')):
asserts.assert_rank(arrays, ranks)
@parameterized.named_parameters(
('not_enough_ranks', [1, 3, 4], [1, 1]),
('too_many_ranks', [1, 2], [1, 1, 1]),
)
def test_rank_should_fail_wrong_length(self, array, rank):
array = np.asarray(array)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Length of inputs and expected_ranks must match.')):
asserts.assert_rank(array, rank)
@parameterized.named_parameters(
('rank_1', rank_array(1), 1),
('rank_2', rank_array(2), 2),
('rank_3', rank_array(3), {1, 2, 3}),
)
def test_rank_should_pass_single_input(self, array, rank):
array = np.asarray(array)
asserts.assert_rank(array, rank)
@parameterized.named_parameters(
('rank_1', rank_array(1), 1),
('rank_2', rank_array(2), 2),
('rank_3', rank_array(3), {1, 2, 3}),
)
def test_rank_should_pass_repeated_input(self, array, rank):
arrays = as_arrays([array] * 3)
asserts.assert_rank(arrays, rank)
@parameterized.named_parameters(
('single_option', [rank_array(1), rank_array(2)], {1, 2}),
('seq_options_1', [rank_array(1), rank_array(2)], [{1, 2}, 2]),
('seq_options_2', [rank_array(1), rank_array(2)], [1, {1, 2}]),
)
def test_rank_should_pass_multiple_options(self, arrays, ranks):
arrays = as_arrays(arrays)
asserts.assert_rank(arrays, ranks)
class TypeAssertTest(parameterized.TestCase):
@parameterized.named_parameters(
('one_float', 3., int),
('one_int', 3, float),
('many_floats', [1., 2., 3.], int),
('many_floats_verbose', [1., 2., 3.], [float, float, int]),
('one_bool_as_float', True, float),
('one_bool_as_int', True, int),
('one_float_as_bool', 3., bool),
('one_int_as_bool', 3, bool),
)
def test_type_should_fail_scalar(self, scalars, wrong_type):
with self.assertRaisesRegex(
AssertionError, _get_err_regex('input .+ has type .+ but expected .+')):
asserts.assert_type(scalars, wrong_type)
@variants.variants(with_device=True, without_device=True)
@parameterized.named_parameters(
('one_float_array', [1., 2.], int),
('one_int_array', [1, 2], float),
)
def test_type_should_fail_array(self, array, wrong_type):
array = self.variant(emplace)(array)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('input .+ has type .+ but expected .+')):
asserts.assert_type(array, wrong_type)
@parameterized.named_parameters(
('one_float', 3., float),
('one_int', 3, int),
('one_bool', True, bool),
('many_floats', [1., 2., 3.], float),
('many_floats_verbose', [1., 2., 3.], [float, float, float]),
)
def test_type_should_pass_scalar(self, array, wrong_type):
asserts.assert_type(array, wrong_type)
@variants.variants(with_device=True, without_device=True)
@parameterized.named_parameters(
('one_float_array', [1., 2.], float),
('one_int_array', [1, 2], int),
)
def test_type_should_pass_array(self, array, wrong_type):
array = self.variant(emplace)(array)
asserts.assert_type(array, wrong_type)
def test_type_should_fail_mixed(self):
a_float = 1.
an_int = 2
a_np_float = np.asarray([3., 4.])
a_jax_int = jnp.asarray([5, 6])
with self.assertRaisesRegex(
AssertionError, _get_err_regex('input .+ has type .+ but expected .+')):
asserts.assert_type([a_float, an_int, a_np_float, a_jax_int],
[float, int, float, float])
def test_type_should_pass_mixed(self):
a_float = 1.
an_int = 2
a_np_float = np.asarray([3., 4.])
a_jax_int = jnp.asarray([5, 6])
asserts.assert_type([a_float, an_int, a_np_float, a_jax_int],
[float, int, float, int])
@parameterized.named_parameters(
('too_many_types', [1., 2], [float, int, float]),
('not_enough_types', [1., 2], [float]),
)
def test_type_should_fail_wrong_length(self, array, wrong_type):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Length of `inputs` and `expected_types` must match')):
asserts.assert_type(array, wrong_type)
def test_type_should_fail_unsupported_dtype(self):
a_float = 1.
an_int = 2
a_np_float = np.asarray([3., 4.])
a_jax_int = jnp.asarray([5, 6])
with self.assertRaisesRegex(AssertionError,
_get_err_regex('unsupported dtype')):
asserts.assert_type([a_float, an_int, a_np_float, a_jax_int],
[complex, complex, float, int])
class AxisDimensionAssertionsTest(parameterized.TestCase):
def test_assert_axis_dimension_pass(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
asserts.assert_axis_dimension(tensor, axis=i, expected=s)
def test_assert_axis_dimension_fail(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Expected tensor to have dimension')):
asserts.assert_axis_dimension(tensor, axis=i, expected=s + 1)
def test_assert_axis_dimension_axis_invalid(self):
tensor = jnp.ones((3, 2))
for i in (2, -3):
with self.assertRaisesRegex(AssertionError,
_get_err_regex('not available')):
asserts.assert_axis_dimension(tensor, axis=i, expected=1)
def test_assert_axis_dimension_gt_pass(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
asserts.assert_axis_dimension_gt(tensor, axis=i, val=s - 1)
def test_assert_axis_dimension_gt_fail(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Expected tensor to have dimension greater than')):
asserts.assert_axis_dimension_gt(tensor, axis=i, val=s)
def test_assert_axis_dimension_gt_axis_invalid(self):
tensor = jnp.ones((3, 2))
for i in (2, -3):
with self.assertRaisesRegex(AssertionError,
_get_err_regex('not available')):
asserts.assert_axis_dimension_gt(tensor, axis=i, val=0)
def test_assert_axis_dimension_gteq_pass(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
asserts.assert_axis_dimension_gteq(tensor, axis=i, val=s)
def test_assert_axis_dimension_gteq_fail(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Expected tensor to have dimension greater than or')):
asserts.assert_axis_dimension_gteq(tensor, axis=i, val=s + 1)
def test_assert_axis_dimension_gteq_axis_invalid(self):
tensor = jnp.ones((3, 2))
for i in (2, -3):
with self.assertRaisesRegex(AssertionError,
_get_err_regex('not available')):
asserts.assert_axis_dimension_gteq(tensor, axis=i, val=0)
def test_assert_axis_dimension_lt_pass(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
asserts.assert_axis_dimension_lt(tensor, axis=i, val=s + 1)
def test_assert_axis_dimension_lt_fail(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Expected tensor to have dimension less than')):
asserts.assert_axis_dimension_lt(tensor, axis=i, val=s)
def test_assert_axis_dimension_lt_axis_invalid(self):
tensor = jnp.ones((3, 2))
for i in (2, -3):
with self.assertRaisesRegex(AssertionError,
_get_err_regex('not available')):
asserts.assert_axis_dimension_lt(tensor, axis=i, val=0)
def test_assert_axis_dimension_lteq_pass(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
asserts.assert_axis_dimension_lteq(tensor, axis=i, val=s)
def test_assert_axis_dimension_lteq_fail(self):
tensor = jnp.ones((3, 2, 7, 2))
for i in range(-tensor.ndim, tensor.ndim):
s = tensor.shape[i]
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Expected tensor to have dimension less than or')):
asserts.assert_axis_dimension_lteq(tensor, axis=i, val=s - 1)
def test_assert_axis_dimension_lteq_axis_invalid(self):
tensor = jnp.ones((3, 2))
for i in (2, -3):
with self.assertRaisesRegex(AssertionError,
_get_err_regex('not available')):
asserts.assert_axis_dimension_lteq(tensor, axis=i, val=0)
def test_assert_axis_dimension_string_tensor(self):
tensor = ['ab', 'cddd']
asserts.assert_axis_dimension(tensor, axis=0, expected=2)
asserts.assert_axis_dimension(np.array(tensor), axis=0, expected=2)
class TreeAssertionsTest(parameterized.TestCase):
def _assert_tree_structs_validation(self, assert_fn):
"""Checks that assert_fn correctly processes invalid args' structs."""
get_val = lambda: jnp.zeros([3])
tree1 = [[get_val(), get_val()], get_val()]
tree2 = [[get_val(), get_val()], get_val()]
tree3 = [get_val(), [get_val(), get_val()]]
tree4 = [get_val(), [get_val(), get_val()], get_val()]
tree5 = dict(x=1, y=2, z=3)
tree6 = dict(x=1, y=2, z=3, n=None)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 1')):
assert_fn(tree1, tree5)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 1')):
assert_fn(tree1, tree3)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 2')):
assert_fn([], [], tree1)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 3')):
assert_fn(tree2, tree1, tree2, tree3, tree1)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 2')):
assert_fn(tree2, tree1, tree4)
# Test `None`s.
with self.assertRaisesRegex(
AssertionError,
_get_err_regex('Error in tree structs equality check.*trees 0 and 1')):
assert_fn(tree5, tree6)
def test_assert_tree_no_nones(self):
with self.subTest('tree_no_nones'):
tree_ok = {'a': [jnp.zeros((1,))], 'b': 1}
asserts.assert_tree_no_nones(tree_ok)
with self.subTest('tree_with_nones'):
tree_with_none = {'a': [jnp.zeros((1,))], 'b': None}
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Tree contains `None`')
):
asserts.assert_tree_no_nones(tree_with_none)
# Check `None`.
with self.subTest('input_none'):
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Tree contains `None`')
):
asserts.assert_tree_no_nones(None)
def test_tree_all_finite_passes_finite(self):
finite_tree = {'a': jnp.ones((3,)), 'b': jnp.array([0.0, 0.0])}
asserts.assert_tree_all_finite(finite_tree)
self.assertTrue(asserts._assert_tree_all_finite_jittable(finite_tree))
def test_tree_all_finite_should_fail_inf(self):
inf_tree = {
'finite_var': jnp.ones((3,)),
'inf_var': jnp.array([0.0, jnp.inf]),
}
err_msg = 'Tree contains non-finite value'
with self.assertRaisesRegex(AssertionError, _get_err_regex(err_msg)):
asserts.assert_tree_all_finite(inf_tree)
with self.assertRaisesRegex(ValueError, err_msg):
asserts._assert_tree_all_finite_jittable(inf_tree)
def test_assert_trees_all_equal_passes_same_tree(self):
tree = {
'a': [jnp.zeros((1,))],
'b': ([0], (0,), 0),
}
asserts.assert_trees_all_equal(tree, tree)
tree = jax.tree_map(jnp.asarray, tree)
self.assertTrue(asserts._assert_trees_all_equal_jittable(tree, tree))
def test_assert_trees_all_equal_passes_values_equal(self):
tree1 = (jnp.array([0.0, 0.0]),)
tree2 = (jnp.array([0.0, 0.0]),)
asserts.assert_trees_all_equal(tree1, tree2)
self.assertTrue(asserts._assert_trees_all_equal_jittable(tree1, tree2))
def test_assert_trees_all_equal_fail_values_close_but_not_equal(self):
tree1 = (jnp.array([1.0, 1.0]),)
tree2 = (jnp.array([1.0, 1.0 + 5e-7]),)
error_msg = 'Values not exactly equal'
with self.assertRaisesRegex(AssertionError, _get_err_regex(error_msg)):
asserts.assert_trees_all_equal(tree1, tree2)
with self.assertRaisesRegex(ValueError, error_msg):
asserts._assert_trees_all_equal_jittable(tree1, tree2)
def test_assert_trees_all_equal_strict_mode(self):
# See 'notes' section of
# https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_array_equal.html
# for details about the 'strict' mode of `numpy.testing.assert_array_equal`.
# tldr; it has special handling for scalar values (by default).
tree1 = {'a': jnp.array([1.0], dtype=jnp.float32), 'b': 0.0}
tree2 = {'a': jnp.array(1.0, dtype=jnp.float32), 'b': 0.0}
asserts.assert_trees_all_equal(tree1, tree2)
asserts.assert_trees_all_equal(tree1, tree2, strict=False)
err_regex = _get_err_regex(r'Trees 0 and 1 differ in leaves \'a\'')
with self.assertRaisesRegex(AssertionError, err_regex):
asserts.assert_trees_all_equal(tree1, tree2, strict=True)
err_regex = r'Trees 0 and 1 differ in leaves'
with self.assertRaisesRegex(ValueError, err_regex):
asserts._assert_trees_all_equal_jittable(tree1, tree2, strict=True)
# We do not implement this special scalar handling in the jittable
# assertion (it's possible, but doesn't seem worth the effort).
err_regex = r'`strict=False` is not implemented'
with self.assertRaisesRegex(NotImplementedError, err_regex):
asserts._assert_trees_all_equal_jittable(tree1, tree2, strict=False)
def test_assert_trees_all_close_passes_same_tree(self):
tree = {
'a': [jnp.zeros((1,))],
'b': ([0], (0,), 0),
}
asserts.assert_trees_all_close(tree, tree)
tree = jax.tree_map(jnp.asarray, tree)
self.assertTrue(asserts._assert_trees_all_close_jittable(tree, tree))
def test_assert_trees_all_close_passes_values_equal(self):
tree1 = (jnp.array([0.0, 0.0]),)
tree2 = (jnp.array([0.0, 0.0]),)
asserts.assert_trees_all_close(tree1, tree2)
self.assertTrue(asserts._assert_trees_all_close_jittable(tree1, tree2))
def test_assert_trees_all_close_passes_values_close_but_not_equal(self):
tree1 = (jnp.array([1.0, 1.0]),)
tree2 = (jnp.array([1.0, 1.0 + 5e-7]),)
asserts.assert_trees_all_close(tree1, tree2, rtol=1e-6)
self.assertTrue(
asserts._assert_trees_all_close_jittable(tree1, tree2, rtol=1e-6))
def test_assert_trees_all_close_bfloat16(self):
tree1 = {'a': jnp.asarray([0.8, 1.6], dtype=jnp.bfloat16)}
tree2 = {
'a': jnp.asarray([0.8, 1.6], dtype=jnp.bfloat16).astype(jnp.float32)
}
tree3 = {'a': jnp.asarray([0.8, 1.7], dtype=jnp.bfloat16)}
asserts.assert_trees_all_close(tree1, tree1)
asserts.assert_trees_all_close(tree1, tree2)
self.assertTrue(asserts._assert_trees_all_close_jittable(tree1, tree2))
err_msg = 'Values not approximately equal'
err_regex = _get_err_regex(err_msg)
with self.assertRaisesRegex(AssertionError, err_regex):
asserts.assert_trees_all_close(tree1, tree3)
with self.assertRaisesRegex(ValueError, err_msg):
asserts._assert_trees_all_close_jittable(tree1, tree3)
with self.assertRaisesRegex(AssertionError, err_regex):
asserts.assert_trees_all_close(tree2, tree3)
with self.assertRaisesRegex(ValueError, err_msg):
asserts._assert_trees_all_close_jittable(tree2, tree3)
def test_assert_trees_all_close_ulp_jittable_raises_valueerror(self):
tree = (jnp.array([1.0]),)
err_msg = 'assert_trees_all_close_ulp is not supported within JIT contexts.'
err_regex = _get_err_regex(err_msg)
with self.assertRaisesRegex(RuntimeError, err_regex):
asserts._assert_trees_all_close_ulp_jittable(tree, tree)
def test_assert_trees_all_close_ulp_passes_same_tree(self):
tree = {
'a': [jnp.zeros((1,))],
'b': ([0], (0,), 0),
}
asserts.assert_trees_all_close_ulp(tree, tree)
def test_assert_trees_all_close_ulp_passes_values_equal(self):
tree1 = (jnp.array([0.0, 0.0]),)
tree2 = (jnp.array([0.0, 0.0]),)
try:
asserts.assert_trees_all_close_ulp(tree1, tree2)
except AssertionError:
self.fail('assert_trees_all_close_ulp raised AssertionError')
def test_assert_trees_all_close_ulp_passes_values_within_maxulp(self):
# np.spacing(np.float32(1 << 23)) == 1.0.
value_where_ulp_is_1 = np.float32(1 << 23)
tree1 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1]),)
tree2 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1 + 1.0]),)
assert tree2[0][0] != tree2[0][1]
try:
asserts.assert_trees_all_close_ulp(tree1, tree2, maxulp=2)
except AssertionError:
self.fail('assert_trees_all_close_ulp raised AssertionError')
def test_assert_trees_all_close_ulp_passes_values_maxulp_apart(self):
# np.spacing(np.float32(1 << 23)) == 1.0.
value_where_ulp_is_1 = np.float32(1 << 23)
tree1 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1]),)
tree2 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1 + 1.0]),)
assert tree2[0][0] != tree2[0][1]
try:
asserts.assert_trees_all_close_ulp(tree1, tree2, maxulp=1)
except AssertionError:
self.fail('assert_trees_all_close_ulp raised AssertionError')
def test_assert_trees_all_close_ulp_fails_values_gt_maxulp_apart(self):
# np.spacing(np.float32(1 << 23)) == 1.0.
value_where_ulp_is_1 = np.float32(1 << 23)
tree1 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1]),)
tree2 = (jnp.array([value_where_ulp_is_1, value_where_ulp_is_1 + 2.0]),)
assert tree2[0][0] != tree2[0][1]
err_msg = re.escape(
'not almost equal up to 1 ULP (max difference is 2 ULP)'
)
err_regex = _get_err_regex(err_msg)
with self.assertRaisesRegex(AssertionError, err_regex):
asserts.assert_trees_all_close_ulp(tree1, tree2, maxulp=1)
def test_assert_trees_all_close_ulp_fails_bfloat16(self):
tree_f32 = (jnp.array([0.0]),)
tree_bf16 = (jnp.array([0.0], dtype=jnp.bfloat16),)
err_msg = 'ULP assertions are not currently supported for bfloat16.'
err_regex = _get_err_regex(err_msg)
with self.assertRaisesRegex(ValueError, err_regex): # pylint: disable=g-error-prone-assert-raises
asserts.assert_trees_all_close_ulp(tree_bf16, tree_bf16)
with self.assertRaisesRegex(ValueError, err_regex): # pylint: disable=g-error-prone-assert-raises
asserts.assert_trees_all_close_ulp(tree_bf16, tree_f32)
def test_assert_tree_has_only_ndarrays(self):
# Check correct inputs.
asserts.assert_tree_has_only_ndarrays({'a': jnp.zeros(1), 'b': np.ones(3)})
asserts.assert_tree_has_only_ndarrays(np.zeros(4))
asserts.assert_tree_has_only_ndarrays(())
# Check incorrect inputs.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' is not an ndarray')):
asserts.assert_tree_has_only_ndarrays({'a': jnp.zeros((1,)), 'b': 1})
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b/1\' is not an ndarray')):
asserts.assert_tree_has_only_ndarrays({'a': jnp.zeros(101), 'b': [1, 2]})
def test_assert_tree_is_on_host(self):
cpu = jax.devices('cpu')[0]
# Check Numpy arrays.
for flag in (False, True):
asserts.assert_tree_is_on_host({'a': np.zeros(1), 'b': np.ones(3)},
allow_cpu_device=flag)
asserts.assert_tree_is_on_host(np.zeros(4), allow_cpu_device=flag)
asserts.assert_tree_is_on_host(
jax.device_get(jax.device_put(np.ones(3))), allow_cpu_device=flag)
asserts.assert_tree_is_on_host((), allow_cpu_device=flag)
# Check DeviceArray (for platforms other than CPU).
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on')):
asserts.assert_tree_is_on_host({'a': jnp.zeros(1)},
allow_cpu_device=False)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on')):
asserts.assert_tree_is_on_host({'a': jax.device_put(np.zeros(1))},
allow_cpu_device=False)
# Check Jax arrays on CPU.
cpu_arr = jax.device_put(np.ones(5), cpu)
asserts.assert_tree_is_on_host({'a': cpu_arr})
asserts.assert_tree_is_on_host({'a': np.zeros(1), 'b': cpu_arr})
# Check sharded Jax arrays on CPUs.
asserts.assert_tree_is_on_host(
{'a': jax.device_put_replicated(np.zeros(1), (cpu,))},
allow_cpu_device=True,
allow_sharded_arrays=True,
)
# Disallow JAX arrays on CPU.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on.*CPU')):
asserts.assert_tree_is_on_host({'a': cpu_arr},
allow_cpu_device=False)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' resides on.*CPU')):
asserts.assert_tree_is_on_host({'a': np.zeros(1), 'b': cpu_arr},
allow_cpu_device=False)
# Check incorrect inputs.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' is not an ndarray')):
asserts.assert_tree_is_on_host({'a': np.zeros(1), 'b': 1})
# ShardedArrays are disallowed.
with self.assertRaisesRegex(
AssertionError, _get_err_regex('sharded arrays are disallowed')
):
asserts.assert_tree_is_on_host(
{'a': jax.device_put_replicated(np.zeros(1), (cpu,))},
allow_cpu_device=False,
)
# ShardedArrays on CPUs, CPUs disallowed.
with self.assertRaisesRegex(
AssertionError, _get_err_regex("'a' is sharded and resides on.*CPU")
):
asserts.assert_tree_is_on_host(
{'a': jax.device_put_replicated(np.zeros(1), (cpu,))},
allow_cpu_device=False,
allow_sharded_arrays=True,
)
def test_assert_tree_is_on_device(self):
# Check CPU platform.
cpu = jax.devices('cpu')[0]
to_cpu = lambda x: jax.device_put(x, cpu)
cpu_tree = {'a': to_cpu(np.zeros(1)), 'b': to_cpu(np.ones(3))}
asserts.assert_tree_is_on_device(cpu_tree, device=cpu)
asserts.assert_tree_is_on_device(cpu_tree, platform='cpu')
asserts.assert_tree_is_on_device(cpu_tree, platform=['cpu'])
asserts.assert_tree_is_on_device(cpu_tree, device=cpu, platform='')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on \'cpu\'')):
asserts.assert_tree_is_on_device(cpu_tree, platform='tpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' resides on \'cpu\'')):
asserts.assert_tree_is_on_device(cpu_tree, platform=('tpu', 'gpu'))
# Check TPU platform (if available).
if _num_devices_available('tpu') > 1:
tpu_1, tpu_2 = jax.devices('tpu')[:2]
to_tpu_1 = lambda x: jax.device_put(x, tpu_1)
to_tpu_2 = lambda x: jax.device_put(x, tpu_2)
tpu_1_tree = {'a': to_tpu_1(np.zeros(1)), 'b': to_tpu_1(np.ones(3))}
tpu_2_tree = {'a': to_tpu_2(np.zeros(1)), 'b': to_tpu_2(np.ones(3))}
tpu_1_2_tree = {'a': to_tpu_1(np.zeros(1)), 'b': to_tpu_2(np.ones(3))}
# Device asserts.
asserts.assert_tree_is_on_device(tpu_1_tree, device=tpu_1)
asserts.assert_tree_is_on_device(tpu_2_tree, device=tpu_2)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on TPU_0')):
asserts.assert_tree_is_on_device(tpu_1_tree, device=tpu_2)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on TPU_1')):
asserts.assert_tree_is_on_device(tpu_2_tree, device=tpu_1)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on .*CPU')):
asserts.assert_tree_is_on_device(cpu_tree, device=tpu_2)
# Platform asserts.
asserts.assert_tree_is_on_device(tpu_1_tree, platform='tpu')
asserts.assert_tree_is_on_device(tpu_2_tree, platform='tpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on \'tpu\'')):
asserts.assert_tree_is_on_device(tpu_1_tree, platform='cpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' resides on \'tpu\'')):
asserts.assert_tree_is_on_device(tpu_2_tree, platform='gpu')
# Mixed cases.
asserts.assert_tree_is_on_device(tpu_1_2_tree, platform='tpu')
asserts.assert_tree_is_on_device((tpu_1_2_tree, cpu_tree),
platform=('cpu', 'tpu'))
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'1/a\' resides on \'cpu\'')):
asserts.assert_tree_is_on_device((tpu_1_2_tree, cpu_tree),
platform='tpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'0/a\' resides on \'tpu\'')):
asserts.assert_tree_is_on_device((tpu_1_2_tree, cpu_tree),
platform=('cpu', 'gpu'))
# Check incorrect inputs.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' is not an ndarray')):
asserts.assert_tree_is_on_device({'a': np.zeros(1), 'b': 1})
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'b\' has unexpected type')):
asserts.assert_tree_is_on_device({'a': jnp.zeros(1), 'b': np.ones(3)})
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'a\' is a ShardedDeviceArra')):
# ShardedArrays are disallowed.
asserts.assert_tree_is_on_device(
{'a': jax.device_put_replicated(np.zeros(1), (cpu,))}, device=cpu)
def test_assert_tree_is_sharded(self):
np_tree = {'a': np.zeros(1), 'b': np.ones(3)}
def _format(*devs):
return re.escape(f'{devs}')
# Check single-device case.
cpu = jax.devices('cpu')[0]
cpu_tree = jax.device_put_replicated(np_tree, (cpu,))
asserts.assert_tree_is_sharded(cpu_tree, devices=(cpu,))
asserts.assert_tree_is_sharded((), devices=(cpu,))
with self.assertRaisesRegex(
AssertionError, _get_err_regex(r'\'a\' is sharded.*expected \(\)')):
asserts.assert_tree_is_sharded(cpu_tree, devices=())
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(cpu)}.*'
f'expected {_format(cpu, cpu)}')):
asserts.assert_tree_is_sharded(cpu_tree, devices=(cpu, cpu))
# Check multiple-devices case (if available).
if _num_devices_available('tpu') > 1:
tpu_1, tpu_2 = jax.devices('tpu')[:2]
tpu_1_tree = jax.device_put_replicated(np_tree, (tpu_1,))
tpu_2_tree = jax.device_put_replicated(np_tree, (tpu_2,))
tpu_1_2_tree = jax.device_put_replicated(np_tree, (tpu_1, tpu_2))
tpu_2_1_tree = jax.device_put_replicated(np_tree, (tpu_2, tpu_1))
asserts.assert_tree_is_sharded(tpu_1_2_tree, devices=(tpu_1, tpu_2))
asserts.assert_tree_is_sharded(tpu_2_1_tree, devices=(tpu_2, tpu_1))
# Wrong device.
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(tpu_1)}.*'
f'expected {_format(tpu_2)}')):
asserts.assert_tree_is_sharded(tpu_1_tree, devices=(tpu_2,))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(cpu)}.*'
f'expected {_format(tpu_2)}')):
asserts.assert_tree_is_sharded(cpu_tree, devices=(tpu_2,))
# Too many devices.
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(tpu_1)}.*'
f'expected {_format(tpu_1, tpu_2)}')):
asserts.assert_tree_is_sharded(tpu_1_tree, devices=(tpu_1, tpu_2))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(tpu_1, tpu_2)}.*'
f'expected {_format(tpu_1, tpu_2, cpu)}')):
asserts.assert_tree_is_sharded(
tpu_1_2_tree, devices=(tpu_1, tpu_2, cpu))
# Wrong order.
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'a\' is sharded across {_format(tpu_2, tpu_1)}.*'
f'expected {_format(tpu_1, tpu_2)}')):
asserts.assert_tree_is_sharded(tpu_2_1_tree, devices=(tpu_1, tpu_2))
# Mixed cases.
mixed_tree = (tpu_1_tree, tpu_2_tree)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'0/a\' is sharded across {_format(tpu_1)}.*'
f'expected {_format(tpu_2)}')):
asserts.assert_tree_is_sharded(mixed_tree, devices=(tpu_2,))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'1/a\' is sharded across {_format(tpu_2)}.*'
f'expected {_format(tpu_1)}')):
asserts.assert_tree_is_sharded(mixed_tree, devices=(tpu_1,))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(f'\'0/a\' is sharded across {_format(tpu_1)}.*'
f'expected {_format(tpu_1, tpu_2)}')):
asserts.assert_tree_is_sharded(mixed_tree, devices=(tpu_1, tpu_2))
# Check incorrect inputs.
with self.assertRaisesRegex(AssertionError,
_get_err_regex('\'1\' is not an ndarray')):
asserts.assert_tree_is_sharded((cpu_tree, 1123), devices=(cpu,))
with self.assertRaisesRegex(
AssertionError, _get_err_regex('\'a\' is not a jax.Array')):
asserts.assert_tree_is_sharded({'a': np.zeros(1)}, devices=(cpu,))
with self.assertRaisesRegex(
AssertionError, _get_err_regex('\'a\' is not sharded')):
asserts.assert_tree_is_sharded({'a': jnp.zeros(1)}, devices=(cpu,))
with self.assertRaisesRegex(
AssertionError, _get_err_regex('\'a\' is not sharded.*CPU')):
asserts.assert_tree_is_sharded({'a': jax.device_put(np.zeros(1), cpu)},
devices=(cpu,))
def test_assert_trees_all_close_fails_different_structure(self):
self._assert_tree_structs_validation(asserts.assert_trees_all_close)
def test_assert_trees_all_close_fails_values_differ(self):
tree1 = jnp.array([0.0, 2.0])
tree2 = jnp.array([0.0, 2.1])
asserts.assert_trees_all_close(tree1, tree2, atol=0.1)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Values not approximately equal')):
asserts.assert_trees_all_close(tree1, tree2, atol=0.01)
asserts.assert_trees_all_close(tree1, tree2, rtol=0.1)
with self.assertRaisesRegex(
AssertionError, _get_err_regex('Values not approximately equal')):
asserts.assert_trees_all_close(tree1, tree2, rtol=0.01)
def test_assert_trees_all_equal_sizes(self):
get_val = lambda s1, s2: jnp.zeros([s1, s2])
tree1 = dict(a1=get_val(3, 1), d=dict(a2=get_val(4, 1), a3=get_val(5, 3)))
tree2 = dict(a1=get_val(3, 1), d=dict(a2=get_val(4, 1), a3=get_val(5, 3)))
tree3 = dict(a1=get_val(3, 1), d=dict(a2=get_val(4, 2), a3=get_val(5, 3)))
self._assert_tree_structs_validation(asserts.assert_trees_all_equal_sizes)
asserts.assert_trees_all_equal_sizes(tree1, tree1)
asserts.assert_trees_all_equal_sizes(tree2, tree1)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
r'Trees 0 and 1 differ in leaves \'d/a2\': sizes: 4 != 8'
)):
asserts.assert_trees_all_equal_sizes(tree1, tree3)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
r'Trees 0 and 3 differ in leaves \'d/a2\': sizes: 4 != 8'
)):
asserts.assert_trees_all_equal_sizes(tree1, tree2, tree2, tree3, tree1)
def test_assert_trees_all_equal_shapes(self):
get_val = lambda s: jnp.zeros([s])
tree1 = dict(a1=get_val(3), d=dict(a2=get_val(4), a3=get_val(5)))
tree2 = dict(a1=get_val(3), d=dict(a2=get_val(4), a3=get_val(5)))
tree3 = dict(a1=get_val(3), d=dict(a2=get_val(7), a3=get_val(5)))
self._assert_tree_structs_validation(asserts.assert_trees_all_equal_shapes)
asserts.assert_trees_all_equal_shapes(tree1, tree1)
asserts.assert_trees_all_equal_shapes(tree2, tree1)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
r'Trees 0 and 1 differ in leaves \'d/a2\': shapes: \(4,\) != \(7,\)'
)):
asserts.assert_trees_all_equal_shapes(tree1, tree3)
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
r'Trees 0 and 3 differ in leaves \'d/a2\': shapes: \(4,\) != \(7,\)'
)):
asserts.assert_trees_all_equal_shapes(tree1, tree2, tree2, tree3, tree1)
def test_assert_trees_all_equal_structs(self):
get_val = lambda: jnp.zeros([3])
tree1 = [[get_val(), get_val()], get_val()]
tree2 = [[get_val(), get_val()], get_val()]
tree3 = [get_val(), [get_val(), get_val()]]
asserts.assert_trees_all_equal_structs(tree1, tree2, tree2, tree1)
asserts.assert_trees_all_equal_structs(tree3, tree3)
self._assert_tree_structs_validation(asserts.assert_trees_all_equal_structs)
@parameterized.named_parameters(
('scalars', ()),
('vectors', (3,)),
('matrices', (3, 2)),
)
def test_assert_tree_shape_prefix(self, shape):
tree = {'x': {'y': np.zeros([3, 2])}, 'z': np.zeros([3, 2, 1])}
with self.subTest('tuple'):
asserts.assert_tree_shape_prefix(tree, tuple(shape))
with self.subTest('list'):
asserts.assert_tree_shape_prefix(tree, list(shape))
def test_leaf_shape_should_fail_wrong_length(self):
tree = {'x': {'y': np.zeros([3, 2])}, 'z': np.zeros([3, 2, 1])}
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(r'leaf \'x/y\' has a shape of length 2')):
asserts.assert_tree_shape_prefix(tree, (3, 2, 1))
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(r'leaf \'x/y\' has a shape of length 2')):
asserts.assert_tree_shape_prefix(tree, [3, 2, 1])
@parameterized.named_parameters(
('scalars', ()),
('vectors', (1,)),
('matrices', (2, 1)),
)
def test_assert_tree_shape_suffix_matching(self, shape):
tree = {'x': {'y': np.zeros([4, 2, 1])}, 'z': np.zeros([2, 1])}
with self.subTest('tuple'):
asserts.assert_tree_shape_suffix(tree, tuple(shape))
with self.subTest('list'):
asserts.assert_tree_shape_suffix(tree, list(shape))
@parameterized.named_parameters(
('bad_suffix_leaf_1', 'z', (1, 1), (2, 1)),
('bad_suffix_leaf_2', 'x/y', (2, 1), (1, 1)),
)
def test_assert_tree_shape_suffix_mismatch(self, leaf, shape_true, shape):
tree = {'x': {'y': np.zeros([4, 2, 1])}, 'z': np.zeros([1, 1])}
error_msg = (
r'Tree leaf \'' + str(leaf) + '\'.*different from expected: '
+ re.escape(str(shape_true)) + ' != ' + re.escape(str(shape))
)
with self.subTest('tuple'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
error_msg)):
asserts.assert_tree_shape_suffix(tree, tuple(shape))
with self.subTest('list'):
with self.assertRaisesRegex(
AssertionError,
_get_err_regex(
error_msg)):
asserts.assert_tree_shape_suffix(tree, list(shape))
def test_assert_tree_shape_suffix_long_suffix(self):
tree = {'x': {'y': np.zeros([4, 2, 1])}, 'z': np.zeros([4, 2, 1])}
asserts.assert_tree_shape_suffix(tree, (4, 2, 1))
asserts.assert_tree_shape_suffix(tree, [4, 2, 1])
with self.assertRaisesRegex(
AssertionError, _get_err_regex('which is smaller than the expected')):
asserts.assert_tree_shape_suffix(tree, (3, 4, 2, 1))
with self.assertRaisesRegex(
AssertionError, _get_err_regex('which is smaller than the expected')):
asserts.assert_tree_shape_suffix(tree, [3, 4, 2, 1])
def test_assert_trees_all_equal_dtypes(self):
t_0 = {'x': np.zeros(3, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
t_1 = {'x': np.zeros(5, dtype=np.uint16), 'y': np.ones(4, dtype=np.float32)}
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 1 differ')):
asserts.assert_trees_all_equal_dtypes(t_0, t_1)
t_2 = {'x': np.zeros(6, dtype=jnp.int16), 'y': np.ones(6, dtype=np.float32)}
asserts.assert_trees_all_equal_dtypes(t_0, t_2, t_0)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 4 differ')):
asserts.assert_trees_all_equal_dtypes(t_0, t_0, t_2, t_0, t_1, t_2)
# np vs jnp
t_3 = {'x': np.zeros(1, dtype=np.int16), 'y': np.ones(2, dtype=jnp.float32)}
t_4 = {
'x': np.zeros(1, dtype=jnp.int16),
'y': np.ones(2, dtype=jnp.float32)
}
asserts.assert_trees_all_equal_dtypes(t_0, t_2, t_3, t_4)
# bfloat16
t_5 = {'y': np.ones(2, dtype=np.float16)}
t_6 = {'y': np.ones(2, dtype=jnp.bfloat16)}
asserts.assert_trees_all_equal_dtypes(t_6, t_6)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 1 differ')):
asserts.assert_trees_all_equal_dtypes(t_5, t_6)
def test_assert_trees_all_equal_shapes_and_dtypes(self):
# Test dtypes
t_0 = {'x': np.zeros(3, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
t_1 = {'x': np.zeros(3, dtype=np.uint16), 'y': np.ones(2, dtype=np.float32)}
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 1 differ')):
asserts.assert_trees_all_equal_shapes_and_dtypes(t_0, t_1)
t_2 = {'x': np.zeros(3, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
asserts.assert_trees_all_equal_shapes_and_dtypes(t_0, t_2, t_0)
# Test shapes
t_0 = {'x': np.zeros(3, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
t_1 = {'x': np.zeros(4, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 1 differ')):
asserts.assert_trees_all_equal_shapes_and_dtypes(t_0, t_1)
t_2 = {'x': np.zeros(3, dtype=np.int16), 'y': np.ones(2, dtype=np.float32)}
asserts.assert_trees_all_equal_shapes_and_dtypes(t_0, t_2, t_0)
def test_assert_trees_all_equal_wrong_usage(self):
# not an array
with self.assertRaisesRegex(AssertionError,
_get_err_regex(r'is not a \(j-\)np array')):
asserts.assert_trees_all_equal_dtypes({'x': 1.}, {'x': np.array(1.)})
# 1 tree
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
ValueError, 'Assertions over only one tree does not make sense'):
asserts.assert_trees_all_equal_dtypes({'x': 1.})
def test_assert_trees_all_equal_none(self):
t_0 = {'x': None, 'y': np.array(2, dtype=np.int32)}
t_1 = {'x': None, 'y': np.array([23], dtype=np.int32)}
t_2 = {'x': None, 'y': np.array(3, dtype=np.float32)}
t_3 = {'y': np.array([23], dtype=np.int32)}
with self.assertRaisesRegex(AssertionError,
_get_err_regex('Trees 0 and 2 differ')):
asserts.assert_trees_all_equal_dtypes(t_0, t_1, t_2)
asserts.assert_trees_all_equal_dtypes(t_0, t_1)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('trees 0 and 1 do not match')):
asserts.assert_trees_all_equal_dtypes(t_0, t_3)
with self.assertRaisesRegex(AssertionError,
_get_err_regex('trees 0 and 1 do not match')):
asserts.assert_trees_all_equal_dtypes(t_0, t_3)
class DevicesAssertTest(parameterized.TestCase):
def _device_count(self, backend):
try:
return jax.device_count(backend)
except RuntimeError:
return 0
@parameterized.parameters('cpu', 'gpu', 'tpu')
def test_not_less_than(self, devtype):
n = self._device_count(devtype)
if n > 0:
asserts.assert_devices_available(
n - 1, devtype, backend=devtype, not_less_than=True)
with self.assertRaisesRegex(AssertionError,
_get_err_regex(f'Only {n} < {n + 1}')):
asserts.assert_devices_available(
n + 1, devtype, backend=devtype, not_less_than=True)
else:
with self.assertRaisesRegex(RuntimeError, # pylint: disable=g-error-prone-assert-raises
'(failed to initialize)|(Unknown backend)'):
asserts.assert_devices_available(
n - 1, devtype, backend=devtype, not_less_than=True)
def test_unsupported_device(self):
with self.assertRaisesRegex(ValueError, 'Unknown device type'): # pylint: disable=g-error-prone-assert-raises
asserts.assert_devices_available(1, 'unsupported_devtype')
def test_gpu_assert(self):
n_gpu = self._device_count('gpu')
asserts.assert_devices_available(n_gpu, 'gpu')
if n_gpu:
asserts.assert_gpu_available()
else:
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No 2 GPUs available')):
asserts.assert_devices_available(2, 'gpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No GPU devices available')):
asserts.assert_gpu_available()
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No 2 GPUs available')):
asserts.assert_devices_available(2, 'gpu', backend='cpu')
def test_cpu_assert(self):
n_cpu = jax.device_count('cpu')
asserts.assert_devices_available(n_cpu, 'cpu', backend='cpu')
def test_tpu_assert(self):
n_tpu = self._device_count('tpu')
asserts.assert_devices_available(n_tpu, 'tpu')
if n_tpu:
asserts.assert_tpu_available()
else:
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No 3 TPUs available')):
asserts.assert_devices_available(3, 'tpu')
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No TPU devices available')):
asserts.assert_tpu_available()
with self.assertRaisesRegex(AssertionError,
_get_err_regex('No 3 TPUs available')):
asserts.assert_devices_available(3, 'tpu', backend='cpu')
class NumericalGradsAssertTest(parameterized.TestCase):
def _test_fn(self, fn, init_args, seed, n=10):
rng_key = jax.random.PRNGKey(seed)
for _ in range(n):
rng_key, *tree_keys = jax.random.split(rng_key, len(init_args) + 1)
x = jax.tree_util.tree_map(
lambda k, x: jax.random.uniform(k, shape=x.shape),
list(tree_keys), list(init_args))
asserts.assert_numerical_grads(fn, x, order=1)
@parameterized.parameters(([1], 24), ([5], 6), ([3, 5], 20))
def test_easy(self, x_shape, seed):
f_easy = lambda x: jnp.sum(x**2 - 2 * x + 10)
init_args = (jnp.zeros(x_shape),)
self._test_fn(f_easy, init_args, seed)
@parameterized.parameters(([1], 24), ([5], 6), ([3, 5], 20))
def test_easy_with_stop_gradient(self, x_shape, seed):
f_easy_sg = lambda x: jnp.sum(jax.lax.stop_gradient(x**2) - 2 * x + 10)
init_args = (jnp.zeros(x_shape),)
self._test_fn(f_easy_sg, init_args, seed)
@parameterized.parameters(([1], 24), ([5], 6), ([3, 5], 20))
def test_hard(self, x_shape, seed):
def f_hard_with_sg(lr, x):
inner_loss = lambda y: jnp.sum((y - 1.0)**2)
inner_loss_grad = jax.grad(inner_loss)
def fu(lr, x):
for _ in range(10):
x1 = x - lr * inner_loss_grad(x) + 100 * lr**2
x2 = x - lr * inner_loss_grad(x) - 100 * lr**2
x = jax.lax.select((x > 3.).any(), x1, x2 + lr)
return x
y = fu(lr, x)
return jnp.sum(inner_loss(y))
lr = jnp.zeros([1] * len(x_shape))
x = jnp.zeros(x_shape)
self._test_fn(f_hard_with_sg, (lr, x), seed)
@parameterized.parameters(([1], 24), ([5], 6), ([3, 5], 20))
def test_hard_with_stop_gradient(self, x_shape, seed):
def f_hard_with_sg(lr, x):
inner_loss = lambda y: jnp.sum((y - 1.0)**2)
inner_loss_grad = jax.grad(inner_loss)
def fu(lr, x):
for _ in range(10):
x1 = x - lr * inner_loss_grad(x) + 100 * jax.lax.stop_gradient(lr)**2
x2 = x - lr * inner_loss_grad(x) - 100 * lr**2
x = jax.lax.select((x > 3.).any(), x1, x2 + jax.lax.stop_gradient(lr))
return x
y = fu(lr, x)
return jnp.sum(inner_loss(y))
lr = jnp.zeros([1] * len(x_shape))
x = jnp.zeros(x_shape)
self._test_fn(f_hard_with_sg, (lr, x), seed)
class EqualAssertionsTest(parameterized.TestCase):
@parameterized.named_parameters(
('dtypes', jnp.int32, jnp.int32),
('lists', [1, 2], [1, 2]),
('dicts', dict(a=[7, jnp.int32]), dict(a=[7, jnp.int32])),
)
def test_assert_equal_pass(self, first, second):
asserts.assert_equal(first, second)
def test_assert_equal_pass_on_arrays(self):
# Not using named_parameters, becase JAX cannot be used before app.run().
asserts.assert_equal(jnp.ones([]), np.ones([]))
asserts.assert_equal(
jnp.ones([], dtype=jnp.int32), np.ones([], dtype=np.float64))
@parameterized.named_parameters(
('dtypes', jnp.int32, jnp.float32),
('lists', [1, 2], [1, 7]),
('lists2', [1, 2], [1]),
('dicts1', dict(a=[7, jnp.int32]), dict(b=[7, jnp.int32])),
('dicts2', dict(a=[7, jnp.int32]), dict(b=[1, jnp.int32])),
('dicts3', dict(a=[7, jnp.int32]), dict(a=[1, jnp.int32], b=2)),
('dicts4', dict(a=[7, jnp.int32]), dict(a=[1, jnp.float32])),
('arrays', np.zeros([]), np.ones([])),
)
def test_assert_equal_fail(self, first, second):
with self.assertRaises(AssertionError):
asserts.assert_equal(first, second)
class IsDivisibleTest(parameterized.TestCase):
def test_assert_is_divisible(self):
asserts.assert_is_divisible(6, 3)
def test_assert_is_divisible_fail(self):
with self.assertRaises(AssertionError):
asserts.assert_is_divisible(7, 3)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
|
chex-master
|
chex/_src/asserts_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `restrict_backends.py`."""
from absl.testing import absltest
from chex._src import restrict_backends
import jax
import jax.numpy as jnp
import numpy as np
def compute_cube(side):
return jnp.sum(jnp.ones((side, side)) * side)
class RestrictBackendsTest(absltest.TestCase):
# These tests need an accelerator of some sort, so that JAX can try to use it.
def setUp(self):
super().setUp()
try:
jax.devices('gpu')
gpu_backend_available = True
except RuntimeError:
gpu_backend_available = False
try:
jax.devices('tpu')
tpu_backend_available = True
except RuntimeError:
tpu_backend_available = False
if not tpu_backend_available or gpu_backend_available:
self.skipTest('No known accelerator backends are available, so these '
'tests will not test anything useful.')
def test_detects_implicitly_forbidden_tpu_computation(self):
with self.assertRaisesRegex(restrict_backends.RestrictedBackendError,
r'forbidden by restrict_backends'):
with restrict_backends.restrict_backends(allowed=['cpu']):
compute_cube(3)
# Make sure the restriction is no longer in place.
np.testing.assert_array_equal(compute_cube(3), 27)
def test_detects_explicitly_forbidden_tpu_computation(self):
with self.assertRaisesRegex(restrict_backends.RestrictedBackendError,
r'forbidden by restrict_backends'):
with restrict_backends.restrict_backends(forbidden=['tpu', 'gpu']):
compute_cube(2)
# Make sure the restriction is no longer in place.
np.testing.assert_array_equal(compute_cube(2), 8)
def test_detects_implicitly_forbidden_cpu_computation(self):
with self.assertRaisesRegex(restrict_backends.RestrictedBackendError,
r'forbidden by restrict_backends'):
with restrict_backends.restrict_backends(allowed=['tpu', 'gpu']):
jax.jit(lambda: compute_cube(8), backend='cpu')()
# Make sure the restriction is no longer in place.
np.testing.assert_array_equal(compute_cube(8), 512)
def test_detects_explicitly_forbidden_cpu_computation(self):
with self.assertRaisesRegex(restrict_backends.RestrictedBackendError,
r'forbidden by restrict_backends'):
with restrict_backends.restrict_backends(forbidden=['cpu']):
jax.jit(lambda: compute_cube(9), backend='cpu')()
# Make sure the restriction is no longer in place.
np.testing.assert_array_equal(compute_cube(9), 729)
def test_ignores_explicitly_allowed_cpu_computation(self):
with restrict_backends.restrict_backends(allowed=['cpu']):
c = jax.jit(lambda: compute_cube(4), backend='cpu')()
np.testing.assert_array_equal(c, 64)
def test_ignores_implicitly_allowed_cpu_computation(self):
with restrict_backends.restrict_backends(forbidden=['tpu', 'gpu']):
c = jax.jit(lambda: compute_cube(5), backend='cpu')()
np.testing.assert_array_equal(c, 125)
def test_ignores_explicitly_allowed_tpu_computation(self):
with restrict_backends.restrict_backends(allowed=['tpu', 'gpu']):
c = jax.jit(lambda: compute_cube(6))()
np.testing.assert_array_equal(c, 216)
def test_ignores_implicitly_allowed_tpu_computation(self):
with restrict_backends.restrict_backends(forbidden=['cpu']):
c = jax.jit(lambda: compute_cube(7))()
np.testing.assert_array_equal(c, 343)
def test_raises_if_no_restrictions_specified(self):
with self.assertRaisesRegex(ValueError, r'No restrictions specified'):
with restrict_backends.restrict_backends():
pass
def test_raises_if_contradictory_restrictions_specified(self):
with self.assertRaisesRegex(ValueError, r"can't be both"):
with restrict_backends.restrict_backends(
allowed=['cpu'], forbidden=['cpu']):
pass
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/restrict_backends_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
|
chex-master
|
chex/_src/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `variants.py`.
To run tests in multi-cpu regime, one need to set the flag `--n_cpu_devices=N`.
"""
import inspect
import itertools
import unittest
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import fake
from chex._src import pytypes
from chex._src import variants
import jax
import jax.numpy as jnp
import numpy as np
FLAGS = flags.FLAGS
ArrayBatched = pytypes.ArrayBatched
DEFAULT_FN = lambda arg_0, arg_1: arg_1 - arg_0
DEFAULT_PARAMS = ((1, 2, 1), (4, 6, 2))
DEFAULT_NDARRAY_PARAMS_SHAPE = (5, 7)
DEFAULT_NAMED_PARAMS = (('case_0', 1, 2, 1), ('case_1', 4, 6, 2))
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule():
fake.set_n_cpu_devices()
asserts.assert_devices_available(
FLAGS['chex_n_cpu_devices'].value, 'cpu', backend='cpu')
def _scalar_to_ndarray(x, shape=None):
return np.broadcast_to(x, shape or DEFAULT_NDARRAY_PARAMS_SHAPE)
def _variant_default_tests_generator(fn, is_jit_context, which_variants,
**var_kwargs):
"""Returns a generator with standard tests.
For internal usage. Allows to dynamically generate common tests.
See tests' names and comments for more information.
Args:
fn: a separate function to be tested (without `self` argument).
is_jit_context: is a function is supposed to be JIT-ted.
which_variants: chex variants to use in tests generation.
**var_kwargs: kwargs for variants wrappers.
Returns:
A generator with tests.
"""
# All generated tests use default arguments (defined at the top of this file).
arg_0, arg_1, expected = DEFAULT_PARAMS[0]
varg_0, varg_1, vexpected = (
_scalar_to_ndarray(a) for a in (arg_0, arg_1, expected))
# We test whether the function has been jitted by introducing a counter
# variable as a side-effect. When the function is repeatedly called, jitted
# code will only execute the side-effect once
python_execution_count = 0
def fn_with_counter(*args, **kwargs):
nonlocal python_execution_count
python_execution_count += 1
return fn(*args, **kwargs)
def exec_with_tracing_counter_checks(self, var_fn, arg_0, arg_1):
self.assertEqual(python_execution_count, 0)
_ = var_fn(arg_0, arg_1)
# In jit context, JAX can omit retracing a function from the previous
# test, hence `python_execution_count` will be equal to 0.
# In non-jit context, `python_execution_count` must always increase.
if not is_jit_context:
self.assertEqual(python_execution_count, 1)
actual = var_fn(arg_0, arg_1)
if is_jit_context:
# Either 1 (initial tracing) or 0 (function reuse).
self.assertLess(python_execution_count, 2)
else:
self.assertEqual(python_execution_count, 2)
return actual
# Here, various tests follow. Tests' names intended to be self-descriptive.
@variants.variants(**which_variants)
def test_with_scalar_args(self):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, arg_0, arg_1)
self.assertEqual(actual, expected)
@variants.variants(**which_variants)
def test_called_variant(self):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(**var_kwargs)(fn_with_counter)
actual = exec_with_tracing_counter_checks(self, var_fn, arg_0, arg_1)
self.assertEqual(actual, expected)
@variants.variants(**which_variants)
def test_with_kwargs(self):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(
self, var_fn, arg_1=arg_1, arg_0=arg_0)
self.assertEqual(actual, expected)
@variants.variants(**which_variants)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_scalar_parameters(self, arg_0, arg_1, expected):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, arg_0, arg_1)
self.assertEqual(actual, expected)
@variants.variants(**which_variants)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_named_scalar_parameters(self, arg_0, arg_1, expected):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, arg_0, arg_1)
self.assertEqual(actual, expected)
@variants.variants(**which_variants)
def test_with_ndarray_args(self):
nonlocal python_execution_count
python_execution_count = 0
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, varg_0, varg_1)
vexpected_ = vexpected
# pmap variant case.
if len(actual.shape) == len(DEFAULT_NDARRAY_PARAMS_SHAPE) + 1:
vexpected_ = jnp.broadcast_to(vexpected_, actual.shape)
np.testing.assert_array_equal(actual, vexpected_)
@variants.variants(**which_variants)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_ndarray_parameters(self, arg_0, arg_1, expected):
nonlocal python_execution_count
python_execution_count = 0
varg_0, varg_1, vexpected = (
_scalar_to_ndarray(a) for a in (arg_0, arg_1, expected))
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, varg_0, varg_1)
# pmap variant case.
if len(actual.shape) == len(DEFAULT_NDARRAY_PARAMS_SHAPE) + 1:
vexpected = jnp.broadcast_to(vexpected, actual.shape)
np.testing.assert_array_equal(actual, vexpected)
@variants.variants(**which_variants)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_ndarray_named_parameters(self, arg_0, arg_1, expected):
nonlocal python_execution_count
python_execution_count = 0
varg_0, varg_1, vexpected = (
_scalar_to_ndarray(a) for a in (arg_0, arg_1, expected))
var_fn = self.variant(fn_with_counter, **var_kwargs)
actual = exec_with_tracing_counter_checks(self, var_fn, varg_0, varg_1)
# pmap variant case.
if len(actual.shape) == len(DEFAULT_NDARRAY_PARAMS_SHAPE) + 1:
vexpected = jnp.broadcast_to(vexpected, actual.shape)
np.testing.assert_array_equal(actual, vexpected)
all_tests = (test_with_scalar_args, test_called_variant, test_with_kwargs,
test_scalar_parameters, test_named_scalar_parameters,
test_with_ndarray_args, test_ndarray_parameters,
test_ndarray_named_parameters)
# Each test is a generator itself, hence we use chaining from itertools.
return itertools.chain(*all_tests)
class ParamsProductTest(absltest.TestCase):
def test_product(self):
l1 = (
('x1', 1, 10),
('x2', 2, 20),
)
l2 = (
('y1', 3),
('y2', 4),
)
l3 = (
('z1', 5, 50),
('z2', 6, 60),
)
l4 = (('aux', 'AUX'),)
expected = [('x1', 1, 10, 'y1', 3, 'z1', 5, 50, 'aux', 'AUX'),
('x1', 1, 10, 'y1', 3, 'z2', 6, 60, 'aux', 'AUX'),
('x1', 1, 10, 'y2', 4, 'z1', 5, 50, 'aux', 'AUX'),
('x1', 1, 10, 'y2', 4, 'z2', 6, 60, 'aux', 'AUX'),
('x2', 2, 20, 'y1', 3, 'z1', 5, 50, 'aux', 'AUX'),
('x2', 2, 20, 'y1', 3, 'z2', 6, 60, 'aux', 'AUX'),
('x2', 2, 20, 'y2', 4, 'z1', 5, 50, 'aux', 'AUX'),
('x2', 2, 20, 'y2', 4, 'z2', 6, 60, 'aux', 'AUX')]
product = list(variants.params_product(l1, l2, l3, l4, named=False))
self.assertEqual(product, expected)
named_expected = [('x1_y1_z1_aux', 1, 10, 3, 5, 50, 'AUX'),
('x1_y1_z2_aux', 1, 10, 3, 6, 60, 'AUX'),
('x1_y2_z1_aux', 1, 10, 4, 5, 50, 'AUX'),
('x1_y2_z2_aux', 1, 10, 4, 6, 60, 'AUX'),
('x2_y1_z1_aux', 2, 20, 3, 5, 50, 'AUX'),
('x2_y1_z2_aux', 2, 20, 3, 6, 60, 'AUX'),
('x2_y2_z1_aux', 2, 20, 4, 5, 50, 'AUX'),
('x2_y2_z2_aux', 2, 20, 4, 6, 60, 'AUX')]
named_product = list(variants.params_product(l1, l2, l3, l4, named=True))
self.assertEqual(named_product, named_expected)
class FailedTestsTest(absltest.TestCase):
# Inner class prevents FailedTest being run by `absltest.main()`.
class FailedTest(variants.TestCase):
@variants.variants(without_jit=True)
def test_failure(self):
self.assertEqual('meaning of life', 1337)
@variants.variants(without_jit=True)
def test_error(self):
raise ValueError('this message does not specify the Chex variant')
def setUp(self):
super().setUp()
self.chex_info = str(variants.ChexVariantType.WITHOUT_JIT)
self.res = unittest.TestResult()
ts = unittest.makeSuite(self.FailedTest) # pytype: disable=module-attr
ts.run(self.res)
def test_useful_failures(self):
self.assertIsNotNone(self.res.failures)
for test_method, _ in self.res.failures:
self.assertIn(self.chex_info, test_method._testMethodName)
def test_useful_errors(self):
self.assertIsNotNone(self.res.errors)
for test_method, msg in self.res.errors:
self.assertIn(self.chex_info, test_method._testMethodName)
self.assertIn('this message does not specify the Chex variant', msg)
class OneFailedVariantTest(variants.TestCase):
# Inner class prevents MaybeFailedTest being run by `absltest.main()`.
class MaybeFailedTest(variants.TestCase):
@variants.variants(with_device=True, without_device=True)
def test_failure(self):
@self.variant
def fails_for_without_device_variant(x):
self.assertIsInstance(x, jax.Array)
fails_for_without_device_variant(42)
def test_useful_failure(self):
expected_info = str(variants.ChexVariantType.WITHOUT_DEVICE)
unexpected_info = str(variants.ChexVariantType.WITH_DEVICE)
res = unittest.TestResult()
ts = unittest.makeSuite(self.MaybeFailedTest) # pytype: disable=module-attr
ts.run(res)
self.assertLen(res.failures, 1)
for test_method, _ in res.failures:
self.assertIn(expected_info, test_method._testMethodName)
self.assertNotIn(unexpected_info, test_method._testMethodName)
class WrongBaseClassTest(variants.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(absltest.TestCase):
@variants.all_variants
def test_failure(self):
pass
def test_wrong_base_class(self):
res = unittest.TestResult()
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(res)
self.assertLen(res.errors, 1)
for _, msg in res.errors:
self.assertRegex(msg,
'RuntimeError.+make sure.+inherit from `chex.TestCase`')
class BaseClassesTest(parameterized.TestCase):
"""Tests different combinations of base classes for a variants test."""
def generate_test_class(self, base_1, base_2):
"""Returns a test class derived from the specified bases."""
class InnerBaseClassTest(base_1, base_2):
@variants.all_variants(with_pmap=False)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_should_pass(self, arg_0, arg_1, expected):
actual = self.variant(DEFAULT_FN)(arg_0, arg_1)
self.assertEqual(actual, expected)
return InnerBaseClassTest
@parameterized.named_parameters(
('parameterized', (parameterized.TestCase, object)),
('variants', (variants.TestCase, object)),
('variants_and_parameterized',
(variants.TestCase, parameterized.TestCase)),
)
def test_inheritance(self, base_classes):
res = unittest.TestResult()
test_class = self.generate_test_class(*base_classes)
for base_class in base_classes:
self.assertTrue(issubclass(test_class, base_class))
ts = unittest.makeSuite(test_class) # pytype: disable=module-attr
ts.run(res)
self.assertEqual(res.testsRun, 8)
self.assertEmpty(res.errors or res.failures)
class VariantsTestCaseWithParameterizedTest(absltest.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(variants.TestCase):
@variants.all_variants(with_pmap=False)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_should_pass(self, arg_0, arg_1, expected):
actual = self.variant(DEFAULT_FN)(arg_0, arg_1)
self.assertEqual(actual, expected)
def test_should_pass(self):
res = unittest.TestResult()
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(res)
self.assertEqual(res.testsRun, 8)
self.assertEmpty(res.errors or res.failures)
class WrongWrappersOrderTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._error_msg = ('A test wrapper attempts to access __name__ of '
'VariantsTestCaseGenerator')
def test_incorrect_wrapping_order_named_all_variants(self):
with self.assertRaisesRegex(RuntimeError, self._error_msg):
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
@variants.all_variants()
def _(*unused_args):
pass
def test_incorrect_wrapping_order_named_some_variants(self):
with self.assertRaisesRegex(RuntimeError, self._error_msg):
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
@variants.variants(with_jit=True, with_device=True)
def _(*unused_args):
pass
def test_incorrect_wrapping_order_all_variants(self):
with self.assertRaisesRegex(RuntimeError, self._error_msg):
@parameterized.parameters(*DEFAULT_PARAMS)
@variants.all_variants()
def _(*unused_args):
pass
def test_incorrect_wrapping_order_some_variants(self):
with self.assertRaisesRegex(RuntimeError, self._error_msg):
@parameterized.parameters(*DEFAULT_PARAMS)
@variants.variants(without_jit=True, without_device=True)
def _(*unused_args):
pass
class UnusedVariantTest(absltest.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(variants.TestCase):
@variants.all_variants(with_pmap=False)
def test_noop(self):
pass
def test_unused_variant(self):
res = unittest.TestResult()
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(res)
self.assertLen(res.errors, 4)
for _, msg in res.errors:
self.assertRegex(
msg, 'RuntimeError: Test is wrapped .+ but never calls self.variant')
class NoVariantsTest(absltest.TestCase):
"""Checks that Chex raises ValueError when no variants are selected."""
def test_no_variants(self):
with self.assertRaisesRegex(ValueError, 'No variants selected'):
class InnerTest(variants.TestCase): # pylint:disable=unused-variable
@variants.variants()
def test_noop(self):
pass
class UnknownVariantArgumentsTest(absltest.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(variants.TestCase):
@variants.all_variants(with_pmap=False)
def test_arg(self):
self.variant(lambda: None, some_unknown_arg=16)
def test_unknown_argument(self):
res = unittest.TestResult()
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(res)
self.assertLen(res.errors, 4)
for _, msg in res.errors:
self.assertRegex(msg, 'Unknown arguments in .+some_unknown_arg')
class VariantTypesTest(absltest.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(variants.TestCase):
var_types = set()
@variants.all_variants()
def test_var_type(self):
self.variant(lambda: None)
self.var_types.add(self.variant.type)
def test_var_type_fetch(self):
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(unittest.TestResult())
expected_types = set(variants.ChexVariantType)
if jax.device_count() == 1:
expected_types.remove(variants.ChexVariantType.WITH_PMAP)
self.assertSetEqual(self.InnerTest.var_types, expected_types)
def test_consistency(self):
self.assertLen(variants._variant_decorators, len(variants.ChexVariantType))
for arg in inspect.getfullargspec(variants.variants).args:
if arg == 'test_method':
continue
self.assertTrue(hasattr(variants.ChexVariantType, arg.upper()))
class CountVariantsTest(absltest.TestCase):
# Inner class prevents InnerTest being run by `absltest.main()`.
class InnerTest(variants.TestCase):
test_1_count = 0
test_2_count = 0
test_3_count = 0
test_4_count = 0
@variants.all_variants
def test_1(self):
type(self).test_1_count += 1
@variants.all_variants(with_pmap=False)
def test_2(self):
type(self).test_2_count += 1
@variants.variants(with_jit=True)
def test_3(self):
type(self).test_3_count += 1
@variants.variants(with_jit=True)
@variants.variants(without_jit=False)
@variants.variants(with_device=True)
@variants.variants(without_device=False)
def test_4(self):
type(self).test_4_count += 1
def test_counters(self):
res = unittest.TestResult()
ts = unittest.makeSuite(self.InnerTest) # pytype: disable=module-attr
ts.run(res)
active_pmap = int(jax.device_count() > 1)
self.assertEqual(self.InnerTest.test_1_count, 4 + active_pmap)
self.assertEqual(self.InnerTest.test_2_count, 4)
self.assertEqual(self.InnerTest.test_3_count, 1)
self.assertEqual(self.InnerTest.test_4_count, 2)
# Test methods do not use `self.variant`.
self.assertLen(res.errors, 1 + 2 + 4 + 4 + active_pmap)
for _, msg in res.errors:
self.assertRegex(
msg, 'RuntimeError: Test is wrapped .+ but never calls self.variant')
class MultipleVariantsTest(parameterized.TestCase):
@variants.all_variants()
def test_all_variants(self):
# self.variant must be used at least once.
self.variant(lambda x: x)(0)
self.assertNotEqual('meaning of life', 1337)
@variants.all_variants
def test_all_variants_no_parens(self):
# self.variant must be used at least once.
self.variant(lambda x: x)(0)
self.assertNotEqual('meaning of life', 1337)
@variants.variants(
with_jit=True, without_jit=True, with_device=True, without_device=True)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_many_variants(self, arg_0, arg_1, expected):
@self.variant
def fn(arg_0, arg_1):
return arg_1 - arg_0
actual = fn(arg_0, arg_1)
self.assertEqual(actual, expected)
class VmappedFunctionTest(parameterized.TestCase):
@variants.all_variants(with_pmap=True)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_vmapped_fn_named_params(self, arg_0, arg_1, expected):
varg_0, varg_1, vexpected = (
_scalar_to_ndarray(x) for x in (arg_0, arg_1, expected))
vmapped_fn = jax.vmap(DEFAULT_FN)
actual = self.variant(vmapped_fn)(varg_0, varg_1)
# pmap variant.
if len(actual.shape) == len(DEFAULT_NDARRAY_PARAMS_SHAPE) + 1:
vexpected = jnp.broadcast_to(vexpected, actual.shape)
np.testing.assert_array_equal(actual, vexpected)
class WithoutJitTest(parameterized.TestCase):
tests = _variant_default_tests_generator(
fn=DEFAULT_FN,
is_jit_context=False,
which_variants=dict(without_jit=True))
class WithJitTest(parameterized.TestCase):
tests = _variant_default_tests_generator(
fn=DEFAULT_FN, is_jit_context=True, which_variants=dict(with_jit=True))
@variants.variants(with_jit=True)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_different_jit_kwargs(self, arg_0, arg_1, expected):
kwarg_0 = arg_0
kwarg_1 = arg_1
arg_0_type = type(arg_0)
arg_1_type = type(arg_1)
kwarg_0_type = type(kwarg_0)
kwarg_1_type = type(kwarg_1)
@self.variant(static_argnums=(0,), static_argnames=('kwarg_1',))
def fn_0(arg_0, arg_1, kwarg_0, kwarg_1):
self.assertIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
self.assertNotIsInstance(kwarg_0, kwarg_0_type)
self.assertIsInstance(kwarg_1, kwarg_1_type)
return DEFAULT_FN(arg_0 + kwarg_0, arg_1 + kwarg_1)
actual_0 = fn_0(arg_0, arg_1, kwarg_0=kwarg_0, kwarg_1=kwarg_1)
self.assertEqual(actual_0, 2 * expected)
@self.variant(static_argnums=(1, 3), static_argnames=('kwarg_1',))
def fn_1(arg_0, arg_1, kwarg_0, kwarg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertIsInstance(arg_1, arg_1_type)
self.assertNotIsInstance(kwarg_0, kwarg_0_type)
self.assertIsInstance(kwarg_1, kwarg_1_type)
return DEFAULT_FN(arg_0 + kwarg_0, arg_1 + kwarg_1)
actual_1 = fn_1(arg_0, arg_1, kwarg_0=kwarg_0, kwarg_1=kwarg_1)
self.assertEqual(actual_1, 2 * expected)
@self.variant(static_argnums=(), static_argnames=('kwarg_0',))
def fn_2(arg_0, arg_1, kwarg_0, kwarg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
self.assertIsInstance(kwarg_0, kwarg_0_type)
self.assertNotIsInstance(kwarg_1, kwarg_1_type)
return DEFAULT_FN(arg_0 + kwarg_0, arg_1 + kwarg_1)
actual_2 = fn_2(arg_0, arg_1, kwarg_0=kwarg_0, kwarg_1=kwarg_1)
self.assertEqual(actual_2, 2 * expected)
def fn_3(arg_0, arg_1):
self.assertIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
return DEFAULT_FN(arg_0, arg_1)
fn_3_v0 = self.variant(static_argnums=0, static_argnames='arg_0')(fn_3)
fn_3_v1 = self.variant(static_argnums=0)(fn_3)
fn_3_v2 = self.variant(static_argnums=(), static_argnames='arg_0')(fn_3)
self.assertEqual(fn_3_v0(arg_0, arg_1), expected)
self.assertEqual(fn_3_v1(arg_0=arg_0, arg_1=arg_1), expected)
self.assertEqual(fn_3_v1(arg_0, arg_1=arg_1), expected)
self.assertEqual(fn_3_v2(arg_0=arg_0, arg_1=arg_1), expected)
def _test_fn_without_device(arg_0, arg_1):
tc = unittest.TestCase()
tc.assertNotIsInstance(arg_0, jax.Array)
tc.assertNotIsInstance(arg_1, jax.Array)
return DEFAULT_FN(arg_0, arg_1)
class WithoutDeviceTest(parameterized.TestCase):
tests = _variant_default_tests_generator(
fn=_test_fn_without_device,
is_jit_context=False,
which_variants=dict(without_device=True))
@variants.variants(without_device=True)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_emplace(self, arg_0, arg_1, expected):
(arg_0, arg_1) = self.variant(lambda x: x)((arg_0, arg_1))
actual = _test_fn_without_device(arg_0, arg_1)
self.assertEqual(actual, expected)
def _test_fn_with_device(arg_0, arg_1):
tc = unittest.TestCase()
tc.assertIsInstance(arg_0, jax.Array)
tc.assertIsInstance(arg_1, jax.Array)
return DEFAULT_FN(arg_0, arg_1)
class WithDeviceTest(parameterized.TestCase):
tests = _variant_default_tests_generator(
fn=_test_fn_with_device,
is_jit_context=False,
which_variants=dict(with_device=True))
@variants.variants(with_device=True)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_emplace(self, arg_0, arg_1, expected):
(arg_0, arg_1) = self.variant(lambda x: x)((arg_0, arg_1))
actual = _test_fn_with_device(arg_0, arg_1)
self.assertEqual(actual, expected)
@variants.variants(with_device=True)
@parameterized.named_parameters(*DEFAULT_NAMED_PARAMS)
def test_ignore_argnums(self, arg_0, arg_1, expected):
static_type = type(arg_0)
@self.variant(ignore_argnums=(0, 2))
def fn(arg_0, arg_1, float_arg):
self.assertIsInstance(arg_0, static_type)
self.assertIsInstance(arg_1, jax.Array)
self.assertIsInstance(float_arg, float)
return DEFAULT_FN(arg_0, arg_1)
actual = fn(arg_0, arg_1, 5.3)
self.assertEqual(actual, expected)
def _test_fn_single_device(arg_0, arg_1):
tc = unittest.TestCase()
tc.assertIn(np.shape(arg_0), {(), DEFAULT_NDARRAY_PARAMS_SHAPE})
tc.assertIn(np.shape(arg_1), {(), DEFAULT_NDARRAY_PARAMS_SHAPE})
res = DEFAULT_FN(arg_0, arg_1)
psum_res = jax.lax.psum(res, axis_name='i')
return psum_res
class WithPmapSingleDeviceTest(parameterized.TestCase):
tests_single_device = _variant_default_tests_generator(
fn=_test_fn_single_device,
is_jit_context=True,
which_variants=dict(with_pmap=True),
n_devices=1)
class WithPmapAllAvailableDeviceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Choose devices and a backend.
n_tpu = asserts._ai.num_devices_available('tpu')
n_gpu = asserts._ai.num_devices_available('gpu')
if n_tpu > 1:
self.n_devices, self.backend = n_tpu, 'tpu'
elif n_gpu > 1:
self.n_devices, self.backend = n_gpu, 'gpu'
else:
self.n_devices, self.backend = FLAGS['chex_n_cpu_devices'].value, 'cpu'
@variants.variants(with_pmap=True)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_pmap(self, arg_0, arg_1, expected):
n_devices, backend = self.n_devices, self.backend
n_copies = 3
arg_0_type = type(arg_0)
arg_1_type = type(arg_1)
@self.variant(reduce_fn=None, n_devices=n_devices, backend=backend)
def fn(arg_0, arg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
asserts.assert_shape(arg_0, [n_copies])
asserts.assert_shape(arg_1, [n_copies])
res = arg_1 - arg_0
psum_res = jax.lax.psum(res, axis_name='i')
return psum_res
arg_0 = jnp.zeros((n_copies,)) + arg_0
arg_1 = jnp.zeros((n_copies,)) + arg_1
actual = fn(arg_0, arg_1)
self.assertEqual(actual.shape, (n_devices, n_copies))
# Exponents of `n_devices`:
# +1: psum() inside fn()
# +1: jnp.sum() to aggregate results
self.assertEqual(jnp.sum(actual), n_copies * n_devices**2 * expected)
@variants.variants(with_pmap=True)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_pmap_vmapped_fn(self, arg_0, arg_1, expected):
n_devices, backend = self.n_devices, self.backend
n_copies = 7
actual_shape = (n_copies,) + DEFAULT_NDARRAY_PARAMS_SHAPE
varg_0 = _scalar_to_ndarray(arg_0, actual_shape)
varg_1 = _scalar_to_ndarray(arg_1, actual_shape)
vexpected = _scalar_to_ndarray(expected)
arg_0_type = type(varg_0)
arg_1_type = type(varg_1)
@self.variant(reduce_fn=None, n_devices=n_devices, backend=backend)
def fn(arg_0, arg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
@jax.vmap
def vmapped_fn(arg_0, arg_1):
self.assertIsInstance(arg_0, ArrayBatched)
self.assertIsInstance(arg_1, ArrayBatched)
asserts.assert_shape(arg_0, actual_shape[1:])
asserts.assert_shape(arg_1, actual_shape[1:])
return arg_1 - arg_0
res = vmapped_fn(arg_0, arg_1)
psum_res = jax.lax.psum(res, axis_name='i')
return psum_res
actual = fn(varg_0, varg_1)
self.assertEqual(actual.shape, (n_devices,) + actual_shape)
# Sum over `n_devices` and `n_copies` axes.
actual = actual.sum(axis=0).sum(axis=0)
# Exponents of `n_devices`:
# +1: psum() inside fn()
# +1: jnp.sum() to aggregate results
np.testing.assert_array_equal(actual, n_copies * n_devices**2 * vexpected)
@variants.variants(with_pmap=True)
@parameterized.parameters(*DEFAULT_PARAMS)
def test_pmap_static_argnums(self, arg_0, arg_1, expected):
n_devices, backend = self.n_devices, self.backend
n_copies = 5
actual_shape = (n_copies,)
varg_0 = _scalar_to_ndarray(arg_0, actual_shape)
arg_0_type = type(varg_0)
arg_1_type = type(arg_1)
@self.variant(
reduce_fn=None,
n_devices=n_devices,
backend=backend,
static_argnums=(1,),
axis_name='j',
)
def fn_static(arg_0, arg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertIsInstance(arg_1, arg_1_type)
asserts.assert_shape(arg_0, [n_copies])
arg_1 = _scalar_to_ndarray(arg_1, actual_shape)
asserts.assert_shape(arg_1, [n_copies])
arg_1 = np.array(arg_1) # don't stage out operations on arg_1
psum_arg_1 = np.sum(jax.lax.psum(arg_1, axis_name='j'))
self.assertEqual(psum_arg_1, arg_1[0] * (n_copies * n_devices))
res = arg_1 - arg_0
psum_res = jax.lax.psum(res, axis_name='j')
return psum_res
actual = fn_static(varg_0, arg_1)
self.assertEqual(actual.shape, (n_devices, n_copies))
# Exponents of `n_devices`:
# +1: psum() inside fn()
# +1: jnp.sum() to aggregate results
self.assertEqual(jnp.sum(actual), n_copies * n_devices**2 * expected)
@variants.variants(with_pmap=True)
def test_pmap_static_argnums_zero(self):
n_devices, backend = self.n_devices, self.backend
n_copies = 5
varg_0 = 10
varg_1 = jnp.zeros(n_copies) + 20
arg_0_type = type(varg_0)
arg_1_type = type(varg_1)
@self.variant(
reduce_fn=None,
n_devices=n_devices,
backend=backend,
static_argnums=0,
)
def fn_static(arg_0, arg_1):
self.assertIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
arg_0 = jnp.zeros(n_copies) + arg_0
asserts.assert_shape(arg_0, [n_copies])
asserts.assert_shape(arg_1, [n_copies])
res = arg_1 - arg_0
return jax.lax.psum(res, axis_name='i')
actual = fn_static(varg_0, varg_1)
self.assertEqual(actual.shape, (n_devices, n_copies))
# Exponents of `n_devices`:
# +1: psum() inside fn()
# +1: jnp.sum() to aggregate results
self.assertEqual(jnp.sum(actual), n_copies * n_devices**2 * 10)
@variants.variants(with_pmap=True)
def test_pmap_in_axes(self):
n_devices, backend = self.n_devices, self.backend
n_copies = 7
varg_0 = jnp.zeros((n_devices, n_copies)) + 1
varg_1 = jnp.zeros((n_devices, n_copies)) + 2
arg_0_type = type(varg_0)
arg_1_type = type(varg_1)
@self.variant(
broadcast_args_to_devices=False,
reduce_fn=None,
n_devices=n_devices,
backend=backend,
# Only 0 or None are supported (06/2020).
in_axes=(0, None),
)
def fn(arg_0, arg_1):
self.assertNotIsInstance(arg_0, arg_0_type)
self.assertNotIsInstance(arg_1, arg_1_type)
asserts.assert_shape(arg_0, [n_copies])
asserts.assert_shape(arg_1, [n_devices, n_copies])
res = arg_1 - arg_0
psum_res = jax.lax.psum(res, axis_name='i')
return psum_res
actual = fn(varg_0, varg_1)
self.assertEqual(actual.shape, (n_devices, n_devices, n_copies))
self.assertEqual(jnp.sum(actual), n_copies * n_devices**3)
@variants.variants(with_pmap=True)
def test_pmap_wrong_axis_size(self):
n_devices, backend = self.n_devices, self.backend
@self.variant(
broadcast_args_to_devices=False,
n_devices=n_devices,
backend=backend,
# Only 0 or None are supported (06/2020).
in_axes=(None, 0),
)
def fn(arg_0, arg_1):
raise RuntimeError('This line should not be executed.')
varg_0 = jnp.zeros(n_devices + 1)
varg_1 = jnp.zeros(n_devices + 2)
with self.assertRaisesRegex(
ValueError, 'Pmappable.* axes size must be equal to number of devices.*'
f'expected the first dim to be {n_devices}'):
fn(varg_0, varg_1)
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/variants_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `fake.py`."""
import dataclasses
import functools
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import fake
from chex._src import pytypes
import jax
import jax.numpy as jnp
ArrayBatched = pytypes.ArrayBatched
ArraySharded = pytypes.ArraySharded
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule():
fake.set_n_cpu_devices()
def _assert_jitted(fn, fn_input, is_jitted):
"""Asserts that a function can be jitted or not.
Args:
fn: The function to be tested
fn_input: Input to pass to the function
is_jitted: Assert that the function can be jitted with jax.jit (True) or
cannot be jitted (False), i.e. the fake jit is working correctly.
"""
asserts.clear_trace_counter()
max_traces = 1 if is_jitted else 0
wrapped_fn = jax.jit(asserts.assert_max_traces(fn, max_traces))
wrapped_fn(fn_input)
def _assert_pmapped(fn, fn_input, is_pmapped, should_jit=False):
"""Asserts whether a function can be pmapped or not.
Args:
fn: The function to be tested
fn_input: Input to pass to the function
is_pmapped: Assert that the function can be pmapped with jax.pmap (True) or
cannot be pmapped (False), i.e. the fake pmap is working correctly.
should_jit: if True, asserts that the function is jitted, regardless of it
being pmapped or not.
"""
num_devices = len(jax.devices())
if should_jit:
asserts.clear_trace_counter()
fn = asserts.assert_max_traces(fn, n=1)
wrapped_fn = jax.pmap(fn, axis_size=num_devices)
fn_input = jnp.broadcast_to(fn_input, (num_devices,) + fn_input.shape)
output = wrapped_fn(fn_input)
# We test whether the function has been pmapped by inspecting the type of
# the function output, if it is a sharded array type then the function has
# been pmapped
if is_pmapped:
expected_type = jax.Array
assert_message = f'Output is type {type(output)}, expected {expected_type}'
assert isinstance(output, expected_type), assert_message
else:
expected_type = 'DeviceArray'
assert_message = f'Output is type {type(output)}, expected {expected_type}'
# ShardedDeviceArray is a subclass of DeviceArray. So, to enforce we have
# a DeviceArray, we also check it's not a sharded one.
assert (isinstance(output, jax.Array) and
len(output.sharding.device_set) == 1), assert_message
class PmapFakeTest(parameterized.TestCase):
def test_assert_pmapped(self):
def foo(x):
return x * 2
fn_input = jnp.ones((4,))
_assert_pmapped(foo, fn_input, True)
# Since this test runs only on 1 device, having a test to check if the
# output is sharded or not is not correct. With jax.Array, you can check
# the `len(output.sharding.device_set)` to see if its sharded or not, but
# here because of a single device it fails.
def test_assert_jitted(self):
fn_input = jnp.ones((4,))
def foo(x):
return x * 2
_assert_jitted(foo, fn_input, True)
with self.assertRaises(AssertionError):
_assert_jitted(foo, fn_input, False)
@parameterized.named_parameters([
('plain_jit', {'enable_patching': True}, False),
('faked_jit', {'enable_patching': False}, True),
])
def test_fake_jit(self, fake_kwargs, is_jitted):
fn_input = jnp.ones((4,))
def foo(x):
return x * 2
# Call with context manager
with fake.fake_jit(**fake_kwargs):
_assert_jitted(foo, fn_input, is_jitted)
# Call with start/stop
ctx = fake.fake_jit(**fake_kwargs)
ctx.start()
_assert_jitted(foo, fn_input, is_jitted)
ctx.stop()
@parameterized.named_parameters([
('plain_pmap_but_jit', True, True),
('plain_pmap', True, False),
('faked_pmap_but_jit', False, True),
('faked_pmap', False, False),
])
def test_fake_pmap_(self, is_pmapped, jit_result):
enable_patching = not is_pmapped
fn_input = jnp.ones((4,))
def foo(x):
return x * 2
# Call with context manager
with fake.fake_pmap(enable_patching=enable_patching, jit_result=jit_result):
_assert_pmapped(foo, fn_input, is_pmapped, jit_result)
# Call with start/stop
ctx = fake.fake_pmap(enable_patching=enable_patching, jit_result=jit_result)
ctx.start()
_assert_pmapped(foo, fn_input, is_pmapped, jit_result)
ctx.stop()
def test_fake_pmap_axis_name(self):
with fake.fake_pmap():
@functools.partial(jax.pmap, axis_name='i')
@functools.partial(jax.pmap, axis_name='j')
def f(_):
return jax.lax.axis_index('i'), jax.lax.axis_index('j')
x, y = f(jnp.zeros((4, 2)))
self.assertEqual(x.tolist(), [[0, 0], [1, 1], [2, 2], [3, 3]])
self.assertEqual(y.tolist(), [[0, 1], [0, 1], [0, 1], [0, 1]])
@parameterized.named_parameters([
('fake_nothing', {
'enable_pmap_patching': False,
'enable_jit_patching': False
}, True, True),
('fake_pmap', {
'enable_pmap_patching': True,
'enable_jit_patching': False
}, False, True),
# Default pmap will implicitly compile the function
('fake_jit', {
'enable_pmap_patching': False,
'enable_jit_patching': True
}, True, False),
('fake_both', {
'enable_pmap_patching': True,
'enable_jit_patching': True
}, False, False),
])
def test_pmap_and_jit(self, fake_kwargs, is_pmapped, is_jitted):
fn_input = jnp.ones((4,))
def foo(x):
return x * 2
# Call with context manager
with fake.fake_pmap_and_jit(**fake_kwargs):
_assert_pmapped(foo, fn_input, is_pmapped)
_assert_jitted(foo, fn_input, is_jitted)
# Call with start/stop
ctx = fake.fake_pmap_and_jit(**fake_kwargs)
ctx.start()
_assert_pmapped(foo, fn_input, is_pmapped)
_assert_jitted(foo, fn_input, is_jitted)
ctx.stop()
@parameterized.named_parameters([
('fake_nothing', False, False),
('fake_pmap', True, False),
('fake_jit', False, True),
('fake_both', True, True),
])
def test_with_kwargs(self, fake_pmap, fake_jit):
with fake.fake_pmap_and_jit(fake_pmap, fake_jit):
num_devices = len(jax.devices())
@functools.partial(jax.pmap, axis_size=num_devices)
@jax.jit
def foo(x, y):
return (x * 2) + y
# pmap over all available devices
inputs = jnp.array([1, 2])
inputs = jnp.broadcast_to(inputs, (num_devices,) + inputs.shape)
expected = jnp.broadcast_to(jnp.array([3, 6]), (num_devices, 2))
asserts.assert_trees_all_close(foo(x=inputs, y=inputs), expected)
@parameterized.named_parameters([
('fake_nothing', False, 1),
('fake_pmap', True, 1),
('fake_nothing_no_static_args', False, ()),
('fake_pmap_no_static_args', True, ()),
])
def test_with_static_broadcasted_argnums(self, fake_pmap, static_argnums):
with fake.fake_pmap_and_jit(fake_pmap, enable_jit_patching=False):
num_devices = len(jax.devices())
# Note: mode='bar' is intended to test that we correctly handle kwargs
# with defaults for which we don't pass a value at call time.
@functools.partial(
jax.pmap,
axis_size=num_devices,
static_broadcasted_argnums=static_argnums,
)
@functools.partial(
jax.jit,
static_argnums=static_argnums,
)
def foo(x, multiplier, y, mode='bar'):
if static_argnums == 1 or 1 in static_argnums:
# Verify that the static arguments are not replaced with tracers.
self.assertIsInstance(multiplier, int)
if mode == 'bar':
return (x * multiplier) + y
else:
return x
# pmap over all available devices
inputs = jnp.array([1, 2])
inputs = jnp.broadcast_to(inputs, (num_devices,) + inputs.shape)
func = lambda: foo(inputs, 100, inputs) # Pass multiplier=100.
if static_argnums == 1: # Should work.
expected = jnp.broadcast_to(jnp.array([101, 202]), (num_devices, 2))
result = func()
asserts.assert_trees_all_close(result, expected)
else: # Should error.
with self.assertRaises(ValueError):
result = func()
@parameterized.parameters(1, [1])
def test_pmap_with_complex_static_broadcasted_object(self, static_argnums):
@dataclasses.dataclass
class Multiplier:
x: int
y: int
def foo(x, multiplier, y):
if static_argnums == 1 or 1 in static_argnums:
# Verify that the static arguments are not replaced with tracers.
self.assertIsInstance(multiplier, Multiplier)
return x * multiplier.x + y * multiplier.y
with fake.fake_pmap_and_jit():
num_devices = jax.device_count()
# pmap over all available devices
transformed_foo = jax.pmap(
foo,
axis_size=num_devices,
static_broadcasted_argnums=static_argnums,
)
x, y = jax.random.randint(
jax.random.PRNGKey(27), (2, num_devices, 3, 5), 0, 10
)
# Test 1.
mult = Multiplier(x=2, y=7)
asserts.assert_trees_all_equal(
transformed_foo(x, mult, y),
foo(x, mult, y),
x * mult.x + y * mult.y,
)
# Test 2.
mult = Multiplier(x=72, y=21)
asserts.assert_trees_all_equal(
transformed_foo(x, mult, y),
foo(x, mult, y),
x * mult.x + y * mult.y,
)
@parameterized.named_parameters([
('fake_nothing', False, False),
('fake_pmap', True, False),
('fake_jit', False, True),
('fake_both', True, True),
])
def test_with_partial(self, fake_pmap, fake_jit):
with fake.fake_pmap_and_jit(fake_pmap, fake_jit):
num_devices = len(jax.devices())
# Testing a common use-case where non-parallel arguments are partially
# applied before pmapping
def foo(x, y, flag):
return (x * 2) + y if flag else (x + y)
foo = functools.partial(foo, flag=True)
foo = jax.pmap(foo, axis_size=num_devices)
foo = jax.jit(foo)
# pmap over all available devices
inputs = jnp.array([1, 2])
inputs = jnp.broadcast_to(inputs, (num_devices,) + inputs.shape)
expected = jnp.broadcast_to(jnp.array([3, 6]), (num_devices, 2))
asserts.assert_trees_all_close(foo(inputs, inputs), expected)
asserts.assert_trees_all_close(foo(x=inputs, y=inputs), expected)
@parameterized.named_parameters([
('fake_nothing', False, False),
('fake_pmap', True, False),
('fake_jit', False, True),
('fake_both', True, True),
])
def test_with_default_params(self, fake_pmap, fake_jit):
with fake.fake_pmap_and_jit(fake_pmap, fake_jit):
num_devices = len(jax.devices())
# Default flag specified at definition time
def foo(x, y, flag=True):
return (x * 2) + y if flag else (x + y)
default_foo = jax.pmap(foo, axis_size=num_devices)
default_foo = jax.jit(default_foo)
inputs = jnp.array([1, 2])
inputs = jnp.broadcast_to(inputs, (num_devices,) + inputs.shape)
expected = jnp.broadcast_to(jnp.array([3, 6]), (num_devices, 2))
asserts.assert_trees_all_close(default_foo(inputs, inputs), expected)
asserts.assert_trees_all_close(default_foo(x=inputs, y=inputs), expected)
# Default overriden by partial to execute other branch
overidden_foo = functools.partial(foo, flag=False)
overidden_foo = jax.pmap(overidden_foo, axis_size=num_devices)
overidden_foo = jax.jit(overidden_foo)
expected = jnp.broadcast_to(jnp.array([2, 4]), (num_devices, 2))
asserts.assert_trees_all_close(overidden_foo(inputs, inputs), expected)
asserts.assert_trees_all_close(
overidden_foo(x=inputs, y=inputs), expected)
def test_parallel_ops_equivalence(self):
"""Test equivalence between parallel operations using pmap and vmap."""
num_devices = len(jax.devices())
inputs = jax.random.uniform(shape=(num_devices, num_devices, 2),
key=jax.random.PRNGKey(1))
def test_equivalence(fn):
with fake.fake_pmap(enable_patching=False):
outputs1 = jax.pmap(fn, axis_name='i', axis_size=num_devices)(inputs)
with fake.fake_pmap(enable_patching=True):
outputs2 = jax.pmap(fn, axis_name='i', axis_size=num_devices)(inputs)
with fake.fake_pmap(enable_patching=True, jit_result=True):
outputs3 = jax.pmap(fn, axis_name='i', axis_size=num_devices)(inputs)
asserts.assert_trees_all_close(outputs1, outputs2, outputs3)
parallel_ops_and_kwargs = [
(jax.lax.psum, {}),
(jax.lax.pmax, {}),
(jax.lax.pmin, {}),
(jax.lax.pmean, {}),
(jax.lax.all_gather, {}),
(jax.lax.all_to_all, {
'split_axis': 0,
'concat_axis': 1
}),
(jax.lax.ppermute, {
'perm': [(x, (x + 1) % num_devices) for x in range(num_devices)]
}),
]
def fn(op, kwargs, x, y=2.0):
return op(x * y, axis_name='i', **kwargs)
partial_fn = functools.partial(fn, y=4.0)
lambda_fn = lambda op, kwargs, x: fn(op, kwargs, x, y=5.0)
for op, kwargs in parallel_ops_and_kwargs:
test_equivalence(functools.partial(fn, op, kwargs))
test_equivalence(functools.partial(fn, op, kwargs, y=3.0))
test_equivalence(functools.partial(partial_fn, op, kwargs))
test_equivalence(functools.partial(lambda_fn, op, kwargs))
def test_fake_parallel_axis(self):
inputs = jnp.ones(shape=(2, 2))
with fake.fake_pmap(fake_parallel_axis=False):
@jax.pmap
def no_fake_parallel_axis_fn(x):
asserts.assert_shape(x, (2,))
return 2.0 * x
outputs = no_fake_parallel_axis_fn(inputs)
asserts.assert_trees_all_close(outputs, 2.0)
with fake.fake_pmap(fake_parallel_axis=True):
@jax.pmap
def fake_parallel_axis_fn(x):
asserts.assert_shape(x, (2, 2,))
return 2.0 * x
outputs = fake_parallel_axis_fn(inputs)
asserts.assert_trees_all_close(outputs, 2.0)
class _Counter():
"""Counts how often an instance is called."""
def __init__(self):
self.count = 0
def __call__(self, *unused_args, **unused_kwargs):
self.count += 1
class OnCallOfTransformedFunctionTest(parameterized.TestCase):
def test_on_call_of_transformed_function(self):
counter = _Counter()
with fake.OnCallOfTransformedFunction('jax.jit', counter):
jax.jit(jnp.sum)(jnp.zeros((10,)))
jax.jit(jnp.max)(jnp.zeros((10,)))
self.assertEqual(counter.count, 2)
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/fake_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A context manager that objects to JAX compilation for specified backends.
This is useful, for example, when certain JAX code needs to run in an
environment where an accelerator is present but reserved for other purposes.
Typically one would use `jax.jit(..., backend='cpu')` to keep the code away
from the accelerator, but it is hard to check by hand that this has been done
without exception throughout an entire subsystem. Then, `restrict_backends()`
can be used to detect any overlooked case and report it by raising an exception.
Similarly, it can be useful for a system such as a learner to make sure that
all required JAX programs have been assigned to their respective backends by
the end of its first iteration; this helps to show that it will not later run
into memory fragmentation problems. By entering a `restrict_backends()` context
at the end of the first iteration, the system can detect any overlooked cases.
"""
import contextlib
import functools
from typing import Optional, Sequence
# pylint: disable=g-import-not-at-top
try:
from jax._src import compiler
except ImportError:
# TODO(phawkins): remove this path after jax>=0.4.15 is the minimum version
# required by chex.
from jax._src import dispatch as compiler # type: ignore
# pylint: enable=g-import-not-at-top
class RestrictedBackendError(RuntimeError):
pass
@contextlib.contextmanager
def restrict_backends(
*,
allowed: Optional[Sequence[str]] = None,
forbidden: Optional[Sequence[str]] = None):
"""Disallows JAX compilation for certain backends.
Args:
allowed: Names of backend platforms (e.g. 'cpu' or 'tpu') for which
compilation is still to be permitted.
forbidden: Names of backend platforms for which compilation is to be
forbidden.
Yields:
None, in a context where compilation for forbidden platforms will raise
a `RestrictedBackendError`.
Raises:
ValueError: if neither `allowed` nor `forbidden` is specified (i.e. they
are both `None`), or if anything is both allowed and forbidden.
"""
allowed = tuple(allowed) if allowed is not None else None
forbidden = tuple(forbidden) if forbidden is not None else None
if allowed is None and forbidden is None:
raise ValueError('No restrictions specified.')
contradictions = set(allowed or ()) & set(forbidden or ())
if contradictions:
raise ValueError(
f"Backends {contradictions} can't be both allowed and forbidden.")
def is_allowed(backend_platform):
return ((backend_platform in allowed) if allowed is not None else
(backend_platform not in forbidden))
inner_backend_compile = compiler.backend_compile
@functools.wraps(inner_backend_compile)
def wrapper(backend, *args, **kwargs):
if not is_allowed(backend.platform):
raise RestrictedBackendError(
f'Compiling a JAX program for {backend.platform} is forbidden by '
f'restrict_backends().')
return inner_backend_compile(backend, *args, **kwargs)
try:
compiler.backend_compile = wrapper
yield
finally:
backend_compile = compiler.backend_compile
assert backend_compile is wrapper, backend_compile
compiler.backend_compile = inner_backend_compile
|
chex-master
|
chex/_src/restrict_backends.py
|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ``dimensions`` module."""
import doctest
from absl.testing import absltest
from absl.testing import parameterized
from chex._src import asserts
from chex._src import dimensions
import jax
import numpy as np
class _ChexModule:
"""Mock module for providing minimal context to docstring tests."""
assert_shape = asserts.assert_shape
assert_rank = asserts.assert_rank
Dimensions = dimensions.Dimensions # pylint: disable=invalid-name
class DimensionsTest(parameterized.TestCase):
def test_docstring_examples(self):
doctest.run_docstring_examples(
dimensions.Dimensions,
globs={'chex': _ChexModule, 'jax': jax, 'jnp': jax.numpy})
@parameterized.named_parameters([
('scalar', '', (), ()),
('vector', 'a', (7,), (7,)),
('list', 'ab', [7, 11], (7, 11)),
('numpy_array', 'abc', np.array([7, 11, 13]), (7, 11, 13)),
('case_sensitive', 'aA', (7, 11), (7, 11)),
])
def test_set_ok(self, k, v, shape):
dims = dimensions.Dimensions(x=23, y=29)
dims[k] = v
asserts.assert_shape(np.empty((23, *shape, 29)), dims['x' + k + 'y'])
def test_set_wildcard(self):
dims = dimensions.Dimensions(x=23, y=29)
dims['a_b__'] = (7, 11, 13, 17, 19)
self.assertEqual(dims['xayb'], (23, 7, 29, 13))
with self.assertRaisesRegex(KeyError, r'\*'):
dims['ab*'] = (7, 11, 13)
def test_get_wildcard(self):
dims = dimensions.Dimensions(x=23, y=29)
self.assertEqual(dims['x*y**'], (23, None, 29, None, None))
asserts.assert_shape(np.empty((23, 1, 29, 2, 3)), dims['x*y**'])
with self.assertRaisesRegex(KeyError, r'\_'):
dims['xy_'] # pylint: disable=pointless-statement
def test_get_literals(self):
dims = dimensions.Dimensions(x=23, y=29)
self.assertEqual(dims['x1y23'], (23, 1, 29, 2, 3))
@parameterized.named_parameters([
('scalar', 'a', 7, TypeError, r'value must be sized'),
('iterator', 'a', (x for x in [7]), TypeError, r'value must be sized'),
('len_mismatch', 'ab', (7, 11, 13), ValueError, r'different length'),
('non_integer_size', 'a', (7.001,),
TypeError, r'cannot be interpreted as a python int'),
('bad_key_type', 13, (7,), TypeError, r'key must be a string'),
('bad_key_string', '@%^#', (7, 11, 13, 17), KeyError, r'\@'),
])
def test_set_exception(self, k, v, e, m):
dims = dimensions.Dimensions(x=23, y=29)
with self.assertRaisesRegex(e, m):
dims[k] = v
@parameterized.named_parameters([
('bad_key_type', 13, TypeError, r'key must be a string'),
('bad_key_string', '@%^#', KeyError, r'\@'),
])
def test_get_exception(self, k, e, m):
dims = dimensions.Dimensions(x=23, y=29)
with self.assertRaisesRegex(e, m):
dims[k] # pylint: disable=pointless-statement
if __name__ == '__main__':
absltest.main()
|
chex-master
|
chex/_src/dimensions_test.py
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chex assertion utilities."""
import collections
import collections.abc
import functools
import inspect
import traceback
from typing import Any, Callable, List, Optional, Sequence, Set, Type, Union, cast
import unittest
from unittest import mock
from chex._src import asserts_internal as _ai
from chex._src import pytypes
import jax
from jax.experimental import checkify
import jax.numpy as jnp
import jax.test_util as jax_test
import numpy as np
Scalar = pytypes.Scalar
Array = pytypes.Array
ArrayTree = pytypes.ArrayTree
_value_assertion = _ai.chex_assertion
_static_assertion = functools.partial(
_ai.chex_assertion, jittable_assert_fn=None)
def disable_asserts() -> None:
"""Disables all Chex assertions.
Use wisely.
"""
_ai.DISABLE_ASSERTIONS = True
def enable_asserts() -> None:
"""Enables Chex assertions."""
_ai.DISABLE_ASSERTIONS = False
def if_args_not_none(fn, *args, **kwargs):
"""Wrap chex assertion to only be evaluated if positional args not `None`."""
found_none = False
for x in args:
found_none = found_none or (x is None)
if not found_none:
fn(*args, **kwargs)
def clear_trace_counter() -> None:
"""Clears Chex traces' counter for ``assert_max_traces`` checks.
Use it to isolate unit tests that rely on ``assert_max_traces``,
by calling it at the start of the test case.
"""
_ai.TRACE_COUNTER.clear()
def assert_max_traces(fn: Optional[Union[Callable[..., Any], int]] = None,
n: Optional[Union[Callable[..., Any], int]] = None):
"""Checks that a function is traced at most `n` times (inclusively).
JAX re-traces jitted functions every time the structure of passed arguments
changes. Often this behaviour is inadvertent and leads to a significant
performance drop which is hard to debug. This wrapper checks that
the function is re-traced at most `n` times during program execution.
Examples:
.. code-block:: python
@jax.jit
@chex.assert_max_traces(n=1)
def fn_sum_jitted(x, y):
return x + y
def fn_sub(x, y):
return x - y
fn_sub_pmapped = jax.pmap(chex.assert_max_retraces(fn_sub), n=10)
More about tracing:
https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html
Args:
fn: A pure python function to wrap (i.e. it must not be a jitted function).
n: The maximum allowed number of retraces (non-negative).
Returns:
Decorated function that raises exception when it is re-traced `n+1`-st time.
Raises:
ValueError: If ``fn`` has already been jitted.
"""
if not callable(fn) and n is None:
# Passed n as a first argument.
n, fn = fn, n
# Currying.
if fn is None:
return lambda fn_: assert_max_traces(fn_, n)
# Args are expected to be in the right order from here onwards.
fn = cast(Callable[..., Any], fn)
n = cast(int, n)
assert_scalar_non_negative(n)
# Check wrappers ordering.
if _ai.is_traceable(fn):
raise ValueError(
"@assert_max_traces must not wrap JAX-transformed function "
"(@jit, @vmap, @pmap etc.); change wrappers ordering.")
# Footprint is defined as a stacktrace of modules' names at the function's
# definition place + its name and source code. This allows to catch retracing
# event both in loops and in sequential calls, and makes this wrapper
# with Colab envs.
fn_footprint = (
tuple(frame.name for frame in traceback.extract_stack()[:-1]) +
(inspect.getsource(fn), fn.__name__))
fn_hash = hash(fn_footprint)
@functools.wraps(fn)
def fn_wrapped(*args, **kwargs):
# We assume that a function without arguments is not being traced.
# That is, case of n=0 for no-arguments function won't raise a error.
has_tracers_in_args = _ai.has_tracers((args, kwargs))
nonlocal fn_hash
_ai.TRACE_COUNTER[fn_hash] += int(has_tracers_in_args)
if not _ai.DISABLE_ASSERTIONS and _ai.TRACE_COUNTER[fn_hash] > n:
raise AssertionError(
f"{_ai.ERR_PREFIX}Function '{fn.__name__}' is traced > {n} times!\n"
"It often happens when a jitted function is defined inside another "
"function that is called multiple times (i.e. the jitted f-n is a "
"new object every time). Make sure that your code does not exploit "
"this pattern (move the nested functions to the top level to fix it)."
" See `chex.clear_trace_counter()` if `@chex.assert_max_traces` is "
"used in any unit tests (especially @parameterized tests).")
return fn(*args, **kwargs)
return fn_wrapped
@_static_assertion
def assert_devices_available(n: int,
devtype: str,
backend: Optional[str] = None,
not_less_than: bool = False) -> None:
"""Checks that `n` devices of a given type are available.
Args:
n: A required number of devices of the given type.
devtype: A type of devices, one of ``{'cpu', 'gpu', 'tpu'}``.
backend: A type of backend to use (uses Jax default if not provided).
not_less_than: Whether to check if the number of devices is not less than
`n`, instead of precise comparison.
Raises:
AssertionError: If number of available device of a given type is not equal
or less than `n`.
"""
n_available = _ai.num_devices_available(devtype, backend=backend)
devs = jax.devices(backend)
if not_less_than and n_available < n:
raise AssertionError(
f"Only {n_available} < {n} {devtype.upper()}s available in {devs}.")
elif not not_less_than and n_available != n:
raise AssertionError(f"No {n} {devtype.upper()}s available in {devs}.")
@_static_assertion
def assert_tpu_available(backend: Optional[str] = None) -> None:
"""Checks that at least one TPU device is available.
Args:
backend: A type of backend to use (uses JAX default if not provided).
Raises:
AssertionError: If no TPU device available.
"""
if not _ai.num_devices_available("tpu", backend=backend):
raise AssertionError(f"No TPU devices available in {jax.devices(backend)}.")
@_static_assertion
def assert_gpu_available(backend: Optional[str] = None) -> None:
"""Checks that at least one GPU device is available.
Args:
backend: A type of backend to use (uses JAX default if not provided).
Raises:
AssertionError: If no GPU device available.
"""
if not _ai.num_devices_available("gpu", backend=backend):
raise AssertionError(f"No GPU devices available in {jax.devices(backend)}.")
@_static_assertion
def assert_equal(first: Any, second: Any) -> None:
"""Checks that the two objects are equal as determined by the `==` operator.
Arrays with more than one element cannot be compared.
Use ``assert_trees_all_close`` to compare arrays.
Args:
first: A first object.
second: A second object.
Raises:
AssertionError: If not ``(first == second)``.
"""
unittest.TestCase().assertEqual(first, second)
@_static_assertion
def assert_not_both_none(first: Any, second: Any) -> None:
"""Checks that at least one of the arguments is not `None`.
Args:
first: A first object.
second: A second object.
Raises:
AssertionError: If ``(first is None) and (second is None)``.
"""
if first is None and second is None:
raise AssertionError(
"At least one of the arguments must be different from `None`.")
@_static_assertion
def assert_exactly_one_is_none(first: Any, second: Any) -> None:
"""Checks that one and only one of the arguments is `None`.
Args:
first: A first object.
second: A second object.
Raises:
AssertionError: If ``(first is None) xor (second is None)`` is `False`.
"""
if (first is None) == (second is None):
raise AssertionError(f"One and exactly one of inputs should be `None`, "
f"got {first} and {second}.")
@_static_assertion
def assert_is_divisible(numerator: int, denominator: int) -> None:
"""Checks that ``numerator`` is divisible by ``denominator``.
Args:
numerator: A numerator.
denominator: A denominator.
Raises:
AssertionError: If ``numerator`` is not divisible by ``denominator``.
"""
if numerator % denominator != 0:
raise AssertionError(f"{numerator} is not divisible by {denominator}.")
@_static_assertion
def assert_scalar(x: Scalar) -> None:
"""Checks that ``x`` is a scalar, as defined in `pytypes.py` (int or float).
Args:
x: An object to check.
Raises:
AssertionError: If ``x`` is not a scalar as per definition in pytypes.py.
"""
if not isinstance(x, (int, float)):
raise AssertionError(f"The argument {x} must be a scalar, got {type(x)}.")
@_static_assertion
def assert_scalar_in(x: Any,
min_: Scalar,
max_: Scalar,
included: bool = True) -> None:
"""Checks that argument is a scalar within segment (by default).
Args:
x: An object to check.
min_: A left border of the segment.
max_: A right border of the segment.
included: Whether to include the borders of the segment in the set of
allowed values.
Raises:
AssertionError: If ``x`` is not a scalar; if ``x`` falls out of the segment.
"""
assert_scalar(x)
if included:
if not min_ <= x <= max_:
raise AssertionError(
f"The argument must be in [{min_}, {max_}], got {x}.")
else:
if not min_ < x < max_:
raise AssertionError(
f"The argument must be in ({min_}, {max_}), got {x}.")
@_static_assertion
def assert_scalar_positive(x: Scalar) -> None:
"""Checks that a scalar is positive.
Args:
x: A value to check.
Raises:
AssertionError: If ``x`` is not a scalar or strictly positive.
"""
assert_scalar(x)
if x <= 0:
raise AssertionError(f"The argument must be positive, got {x}.")
@_static_assertion
def assert_scalar_non_negative(x: Scalar) -> None:
"""Checks that a scalar is non-negative.
Args:
x: A value to check.
Raises:
AssertionError: If ``x`` is not a scalar or negative.
"""
assert_scalar(x)
if x < 0:
raise AssertionError(f"The argument must be non-negative, was {x}.")
@_static_assertion
def assert_scalar_negative(x: Scalar) -> None:
"""Checks that a scalar is negative.
Args:
x: A value to check.
Raises:
AssertionError: If ``x`` is not a scalar or strictly negative.
"""
assert_scalar(x)
if x >= 0:
raise AssertionError(f"The argument must be negative, was {x}.")
@_static_assertion
def assert_equal_size(inputs: Sequence[Array]) -> None:
"""Checks that all arrays have the same size.
Args:
inputs: A collection of arrays.
Raises:
AssertionError: If the size of all arrays do not match.
"""
_ai.assert_collection_of_arrays(inputs)
size = inputs[0].size
expected_sizes = [size] * len(inputs)
sizes = [x.size for x in inputs]
if sizes != expected_sizes:
raise AssertionError(f"Arrays have different sizes: {sizes}")
@_static_assertion
def assert_size(
inputs: Union[Scalar, Union[Array, Sequence[Array]]],
expected_sizes: Union[_ai.TShapeMatcher,
Sequence[_ai.TShapeMatcher]]) -> None:
"""Checks that the size of all inputs matches specified ``expected_sizes``.
Valid usages include:
.. code-block:: python
assert_size(x, 1) # x is scalar (size 1)
assert_size([x, y], (2, {1, 3})) # x has size 2, y has size 1 or 3
assert_size([x, y], (2, ...)) # x has size 2, y has any size
assert_size([x, y], 1) # x and y are scalar (size 1)
assert_size((x, y), (5, 2)) # x has size 5, y has size 2
Args:
inputs: An array or a sequence of arrays.
expected_sizes: A sqeuence of expected sizes associated with each input,
where the expected size is a sequence of integer and `None` dimensions;
if all inputs have same size, a single size may be passed as
``expected_sizes``.
Raises:
AssertionError: If the lengths of ``inputs`` and ``expected_sizes`` do not
match; if ``expected_sizes`` has wrong type; if size of ``input`` does
not match ``expected_sizes``.
"""
# Ensure inputs and expected sizes are sequences.
if not isinstance(inputs, collections.abc.Sequence):
inputs = [inputs]
if isinstance(expected_sizes, int):
expected_sizes = [expected_sizes] * len(inputs)
if not isinstance(expected_sizes, (list, tuple)):
raise AssertionError(
"Error in size compatibility check: expected sizes should be an int, "
f"list, or tuple of ints, got {expected_sizes}.")
if len(inputs) != len(expected_sizes):
raise AssertionError(
"Length of `inputs` and `expected_sizes` must match: "
f"{len(inputs)} is not equal to {len(expected_sizes)}.")
errors = []
for idx, (x, expected) in enumerate(zip(inputs, expected_sizes)):
size = getattr(x, "size", 1) # scalars have size 1 by definition.
# Allow any size for the ellipsis case and allow handling of integer
# expected sizes or collection of acceptable expected sizes.
int_condition = expected in {Ellipsis, None} or size == expected
set_condition = (isinstance(expected, collections.abc.Collection) and
size in expected)
if not (int_condition or set_condition):
errors.append((idx, size, expected))
if errors:
msg = "; ".join(
f"input {e[0]} has size {e[1]} but expected {e[2]}" for e in errors)
raise AssertionError(f"Error in size compatibility check: {msg}.")
@_static_assertion
def assert_equal_shape(
inputs: Sequence[Array],
*,
dims: Optional[Union[int, Sequence[int]]] = None) -> None:
"""Checks that all arrays have the same shape.
Args:
inputs: A collection of arrays.
dims: An optional integer or sequence of integers. If not provided, every
dimension of every shape must match. If provided, equality of shape will
only be asserted for the specified dim(s), i.e. to ensure all of a group
of arrays have the same size in the first two dimensions, call
``assert_equal_shape(tensors_list, dims=(0, 1))``.
Raises:
AssertionError: If the shapes of all arrays at specified dims do not match.
ValueError: If the provided ``dims`` are invalid indices into any of arrays;
or if ``inputs`` is not a collection of arrays.
"""
_ai.assert_collection_of_arrays(inputs)
# NB: Need explicit dims argument, closing over it triggers linter bug.
def extract_relevant_dims(shape, dims):
try:
if dims is None:
return shape
elif isinstance(dims, int):
return shape[dims]
else:
return [shape[d] for d in dims]
except IndexError as err:
raise ValueError(
f"Indexing error when trying to extra dim(s) {dims} from array shape "
f"{shape}") from err
shape = extract_relevant_dims(inputs[0].shape, dims)
expected_shapes = [shape] * len(inputs)
shapes = [extract_relevant_dims(x.shape, dims) for x in inputs]
if shapes != expected_shapes:
if dims is not None:
msg = f"Arrays have different shapes at dims {dims}: {shapes}"
else:
msg = f"Arrays have different shapes: {shapes}."
raise AssertionError(msg)
@_static_assertion
def assert_equal_shape_prefix(inputs: Sequence[Array], prefix_len: int) -> None:
"""Checks that the leading ``prefix_dims`` dims of all inputs have same shape.
Args:
inputs: A collection of input arrays.
prefix_len: A number of leading dimensions to compare; each input's shape
will be sliced to ``shape[:prefix_len]``. Negative values are accepted and
have the conventional Python indexing semantics.
Raises:
AssertionError: If the shapes of all arrays do not match.
ValuleError: If ``inputs`` is not a collection of arrays.
"""
_ai.assert_collection_of_arrays(inputs)
shapes = [array.shape[:prefix_len] for array in inputs]
if shapes != [shapes[0]] * len(shapes):
raise AssertionError(f"Arrays have different shape prefixes: {shapes}")
@_static_assertion
def assert_equal_shape_suffix(inputs: Sequence[Array], suffix_len: int) -> None:
"""Checks that the final ``suffix_len`` dims of all inputs have same shape.
Args:
inputs: A collection of input arrays.
suffix_len: A number of trailing dimensions to compare; each input's shape
will be sliced to ``shape[-suffix_len:]``. Negative values are accepted
and have the conventional Python indexing semantics.
Raises:
AssertionError: If the shapes of all arrays do not match.
ValuleError: If ``inputs`` is not a collection of arrays.
"""
_ai.assert_collection_of_arrays(inputs)
shapes = [array.shape[-suffix_len:] for array in inputs]
if shapes != [shapes[0]] * len(shapes):
raise AssertionError(f"Arrays have different shape suffixes: {shapes}")
def _unelided_shape_matches(
actual_shape: Sequence[int],
expected_shape: Sequence[Optional[Union[int, Set[int]]]]) -> bool:
"""Returns True if `actual_shape` is compatible with `expected_shape`."""
if len(actual_shape) != len(expected_shape):
return False
for actual, expected in zip(actual_shape, expected_shape):
if expected is None:
continue
if isinstance(expected, set):
if actual not in expected:
return False
elif actual != expected:
return False
return True
def _shape_matches(actual_shape: Sequence[int],
expected_shape: _ai.TShapeMatcher) -> bool:
"""Returns True if `actual_shape` is compatible with `expected_shape`."""
# Splits `expected_shape` based on the position of the ellipsis, if present.
expected_prefix: List[_ai.TDimMatcher] = []
expected_suffix: Optional[List[_ai.TDimMatcher]] = None
for dim in expected_shape:
if dim is Ellipsis:
if expected_suffix is not None:
raise ValueError(
"`expected_shape` may not contain more than one ellipsis, "
f"but got {_ai.format_shape_matcher(expected_shape)}")
expected_suffix = []
elif expected_suffix is None:
expected_prefix.append(dim)
else:
expected_suffix.append(dim)
# If there is no ellipsis, just compare to the full `actual_shape`.
if expected_suffix is None:
assert len(expected_prefix) == len(expected_shape)
return _unelided_shape_matches(actual_shape, expected_prefix)
# Checks that the actual rank is least the number of non-elided dimensions.
if len(actual_shape) < len(expected_prefix) + len(expected_suffix):
return False
if expected_prefix:
actual_prefix = actual_shape[:len(expected_prefix)]
if not _unelided_shape_matches(actual_prefix, expected_prefix):
return False
if expected_suffix:
actual_suffix = actual_shape[-len(expected_suffix):]
if not _unelided_shape_matches(actual_suffix, expected_suffix):
return False
return True
@_static_assertion
def assert_shape(
inputs: Union[Scalar, Union[Array, Sequence[Array]]],
expected_shapes: Union[_ai.TShapeMatcher,
Sequence[_ai.TShapeMatcher]]) -> None:
"""Checks that the shape of all inputs matches specified ``expected_shapes``.
Valid usages include:
.. code-block:: python
assert_shape(x, ()) # x is scalar
assert_shape(x, (2, 3)) # x has shape (2, 3)
assert_shape(x, (2, {1, 3})) # x has shape (2, 1) or (2, 3)
assert_shape(x, (2, None)) # x has rank 2 and `x.shape[0] == 2`
assert_shape(x, (2, ...)) # x has rank >= 1 and `x.shape[0] == 2`
assert_shape([x, y], ()) # x and y are scalar
assert_shape([x, y], [(), (2,3)]) # x is scalar and y has shape (2, 3)
Args:
inputs: An array or a sequence of arrays.
expected_shapes: A sequence of expected shapes associated with each input,
where the expected shape is a sequence of integer and `None` dimensions;
if all inputs have same shape, a single shape may be passed as
``expected_shapes``.
Raises:
AssertionError: If the lengths of ``inputs`` and ``expected_shapes`` do not
match; if ``expected_shapes`` has wrong type; if shape of ``input`` does
not match ``expected_shapes``.
"""
if not isinstance(expected_shapes, (list, tuple)):
raise AssertionError(
"Error in shape compatibility check: expected shapes should be a list "
f"or tuple of ints, got {expected_shapes}.")
# Ensure inputs and expected shapes are sequences.
if not isinstance(inputs, collections.abc.Sequence):
inputs = [inputs]
# Shapes are always lists or tuples, not scalars.
if (not expected_shapes or not isinstance(expected_shapes[0], (list, tuple))):
expected_shapes = [expected_shapes] * len(inputs)
if len(inputs) != len(expected_shapes):
raise AssertionError(
"Length of `inputs` and `expected_shapes` must match: "
f"{len(inputs)} is not equal to {len(expected_shapes)}.")
errors = []
for idx, (x, expected) in enumerate(zip(inputs, expected_shapes)):
shape = getattr(x, "shape", ()) # scalars have shape () by definition.
if not _shape_matches(shape, expected):
errors.append((idx, shape, _ai.format_shape_matcher(expected)))
if errors:
msg = "; ".join(
f"input {e[0]} has shape {e[1]} but expected {e[2]}" for e in errors)
raise AssertionError(f"Error in shape compatibility check: {msg}.")
@_static_assertion
def assert_is_broadcastable(shape_a: Sequence[int],
shape_b: Sequence[int]) -> None:
"""Checks that an array of ``shape_a`` is broadcastable to one of ``shape_b``.
Args:
shape_a: A shape of the array to check.
shape_b: A target shape after broadcasting.
Raises:
AssertionError: If ``shape_a`` is not broadcastable to ``shape_b``.
"""
error = AssertionError(
f"Shape {shape_a} is not broadcastable to shape {shape_b}.")
ndim_a = len(shape_a)
ndim_b = len(shape_b)
if ndim_a > ndim_b:
raise error
else:
for i in range(1, ndim_a + 1):
if shape_a[-i] != 1 and shape_a[-i] != shape_b[-i]:
raise error
@_static_assertion
def assert_equal_rank(inputs: Sequence[Array]) -> None:
"""Checks that all arrays have the same rank.
Args:
inputs: A collection of arrays.
Raises:
AssertionError: If the ranks of all arrays do not match.
ValueError: If ``inputs`` is not a collection of arrays.
"""
_ai.assert_collection_of_arrays(inputs)
rank = len(inputs[0].shape)
expected_ranks = [rank] * len(inputs)
ranks = [len(x.shape) for x in inputs]
if ranks != expected_ranks:
raise AssertionError(f"Arrays have different rank: {ranks}.")
@_static_assertion
def assert_rank(
inputs: Union[Scalar, Union[Array, Sequence[Array]]],
expected_ranks: Union[int, Set[int], Sequence[Union[int,
Set[int]]]]) -> None:
"""Checks that the rank of all inputs matches specified ``expected_ranks``.
Valid usages include:
.. code-block:: python
assert_rank(x, 0) # x is scalar
assert_rank(x, 2) # x is a rank-2 array
assert_rank(x, {0, 2}) # x is scalar or rank-2 array
assert_rank([x, y], 2) # x and y are rank-2 arrays
assert_rank([x, y], [0, 2]) # x is scalar and y is a rank-2 array
assert_rank([x, y], {0, 2}) # x and y are scalar or rank-2 arrays
Args:
inputs: An array or a sequence of arrays.
expected_ranks: A sequence of expected ranks associated with each input,
where the expected rank is either an integer or set of integer options; if
all inputs have same rank, a single scalar or set of scalars may be passed
as ``expected_ranks``.
Raises:
AssertionError: If lengths of ``inputs`` and ``expected_ranks`` don't match;
if ``expected_ranks`` has wrong type;
if the ranks of ``inputs`` do not match ``expected_ranks``.
ValueError: If ``expected_ranks`` is not an integer and not a sequence of
integets.
"""
if not isinstance(expected_ranks, (collections.abc.Collection, int)):
raise ValueError(
f"Error in rank compatibility check: expected ranks should be a single "
f"integer or a collection of integers, got {expected_ranks}.")
if isinstance(expected_ranks, np.ndarray): # ndarray is abc.Collection
raise ValueError(
f"Error in rank compatibility check: expected ranks should be a single "
f"integer or a collection of integers, but was an array: "
f"{expected_ranks}.")
# Ensure inputs and expected ranks are sequences.
if not isinstance(inputs, collections.abc.Sequence):
inputs = [inputs]
if (not isinstance(expected_ranks, collections.abc.Sequence) or
isinstance(expected_ranks, collections.abc.Set)):
expected_ranks = [expected_ranks] * len(inputs)
if len(inputs) != len(expected_ranks):
raise AssertionError(
"Length of inputs and expected_ranks must match: inputs has length "
f"{len(inputs)}, expected_ranks has length {len(expected_ranks)}.")
errors = []
for idx, (x, expected) in enumerate(zip(inputs, expected_ranks)):
if hasattr(x, "shape"):
shape = x.shape
else:
shape = () # scalars have shape () by definition.
rank = len(shape)
# Multiple expected options can be specified.
# Check against old usage where options could be any sequence
if (isinstance(expected, collections.abc.Sequence) and
not isinstance(expected, collections.abc.Set)):
raise ValueError("Error in rank compatibility check: "
"Expected ranks should be integers or sets of integers.")
options = (
expected if isinstance(expected, collections.abc.Set) else {expected})
if rank not in options:
errors.append((idx, rank, shape, expected))
if errors:
msg = "; ".join(
f"input {e[0]} has rank {e[1]} (shape {e[2]}) but expected {e[3]}"
for e in errors)
raise AssertionError(f"Error in rank compatibility check: {msg}.")
@_static_assertion
def assert_type(
inputs: Union[Scalar, Union[Array, Sequence[Array]]],
expected_types: Union[Type[Scalar], Sequence[Type[Scalar]]]) -> None:
"""Checks that the type of all inputs matches specified ``expected_types``.
Valid usages include:
.. code-block:: python
assert_type(7, int)
assert_type(7.1, float)
assert_type(False, bool)
assert_type([7, 8], int)
assert_type([7, 7.1], [int, float])
assert_type(np.array(7), int)
assert_type(np.array(7.1), float)
assert_type(jnp.array(7), int)
assert_type([jnp.array([7, 8]), np.array(7.1)], [int, float])
Args:
inputs: An array or a sequence of arrays or scalars.
expected_types: A sequence of expected types associated with each input; if
all inputs have same type, a single type may be passed as
``expected_types``.
Raises:
AssertionError: If lengths of ``inputs`` and ``expected_types`` don't match;
if ``expected_types`` contains unsupported pytype;
if the types of inputs do not match the expected types.
"""
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if not isinstance(expected_types, (list, tuple)):
expected_types = [expected_types] * len(inputs)
errors = []
if len(inputs) != len(expected_types):
raise AssertionError(f"Length of `inputs` and `expected_types` must match, "
f"got {len(inputs)} != {len(expected_types)}.")
for idx, (x, expected) in enumerate(zip(inputs, expected_types)):
if jnp.issubdtype(expected, jnp.floating):
parent = jnp.floating
elif jnp.issubdtype(expected, jnp.integer):
parent = jnp.integer
elif jnp.issubdtype(expected, jnp.bool_):
parent = jnp.bool_
else:
raise AssertionError(
f"Error in type compatibility check, unsupported dtype '{expected}'.")
if not jnp.issubdtype(jnp.result_type(x), parent):
errors.append((idx, jnp.result_type(x), expected))
if errors:
msg = "; ".join(
f"input {e[0]} has type {e[1]} but expected {e[2]}" for e in errors)
raise AssertionError(f"Error in type compatibility check: {msg}.")
@_static_assertion
def assert_axis_dimension_comparator(tensor: Array, axis: int,
pass_fn: Callable[[int], bool],
error_string: str):
"""Asserts that `pass_fn(tensor.shape[axis])` passes.
Used to implement ==, >, >=, <, <= checks.
Args:
tensor: A JAX array.
axis: An integer specifying which axis to assert.
pass_fn: A callable which takes the size of the give dimension and returns
false when the assertion should fail.
error_string: string which is inserted in assertion failure messages -
'expected tensor to have dimension {error_string} on axis ...'.
Raises:
AssertionError: if `pass_fn(tensor.shape[axis], val)` does not return true.
"""
if not isinstance(tensor, (jax.Array, np.ndarray)):
tensor = np.asarray(tensor) # np is broader than jnp (it supports strings)
if axis >= len(tensor.shape) or axis < -len(tensor.shape):
raise AssertionError(
f"Expected tensor to have dim {error_string} on axis "
f"'{axis}' but axis '{axis}' not available: tensor rank is "
f"'{len(tensor.shape)}'.")
if not pass_fn(tensor.shape[axis]):
raise AssertionError(
f"Expected tensor to have dimension {error_string} on axis"
f" '{axis}' but got '{tensor.shape[axis]}' instead.")
@_static_assertion
def assert_axis_dimension(tensor: Array, axis: int, expected: int) -> None:
"""Checks that ``tensor.shape[axis] == expected``.
Args:
tensor: A JAX array.
axis: An integer specifying which axis to assert.
expected: An expected value of ``tensor.shape[axis]``.
Raises:
AssertionError:
The dimension of the specified axis does not match the prescribed value.
"""
assert_axis_dimension_comparator(
tensor,
axis,
pass_fn=lambda tensor_dim: tensor_dim == expected,
error_string=f"equal to '{expected}'")
@_static_assertion
def assert_axis_dimension_gt(tensor: Array, axis: int, val: int) -> None:
"""Checks that ``tensor.shape[axis] > val``.
Args:
tensor: A JAX array.
axis: An integer specifying which axis to assert.
val: A value ``tensor.shape[axis]`` must be greater than.
Raises:
AssertionError: if the dimension of ``axis`` is <= ``val``.
"""
assert_axis_dimension_comparator(
tensor,
axis,
pass_fn=lambda tensor_dim: tensor_dim > val,
error_string=f"greater than '{val}'")
@_static_assertion
def assert_axis_dimension_gteq(tensor: Array, axis: int, val: int) -> None:
"""Checks that ``tensor.shape[axis] >= val``.
Args:
tensor: A JAX array.
axis: An integer specifying which axis to assert.
val: A value ``tensor.shape[axis]`` must be greater than or equal to.
Raises:
AssertionError: if the dimension of ``axis`` is < ``val``.
"""
assert_axis_dimension_comparator(
tensor,
axis,
pass_fn=lambda tensor_dim: tensor_dim >= val,
error_string=f"greater than or equal to '{val}'")
@_static_assertion
def assert_axis_dimension_lt(tensor: Array, axis: int, val: int) -> None:
"""Checks that ``tensor.shape[axis] < val``.
Args:
tensor: A JAX Array.
axis: An integer specifiying with axis to assert.
val: A value ``tensor.shape[axis]`` must be less than.
Raises:
AssertionError: if the dimension of ``axis`` is >= ``val``.
"""
assert_axis_dimension_comparator(
tensor,
axis,
pass_fn=lambda tensor_dim: tensor_dim < val,
error_string=f"less than '{val}'")
@_static_assertion
def assert_axis_dimension_lteq(tensor: Array, axis: int, val: int) -> None:
"""Checks that ``tensor.shape[axis] <= val``.
Args:
tensor: A JAX array.
axis: An integer specifying which axis to assert.
val: A value ``tensor.shape[axis]`` must be less than or equal to.
Raises:
AssertionError: if the dimension of ``axis`` is > ``val``.
"""
assert_axis_dimension_comparator(
tensor,
axis,
pass_fn=lambda tensor_dim: tensor_dim <= val,
error_string=f"less than or equal to '{val}'")
@_static_assertion
def assert_numerical_grads(f: Callable[..., Array],
f_args: Sequence[Array],
order: int,
atol: float = 0.01,
**check_kwargs) -> None:
"""Checks that autodiff and numerical gradients of a function match.
Args:
f: A function to check.
f_args: Arguments of the function.
order: An order of gradients.
atol: An absolute tolerance.
**check_kwargs: Kwargs for ``jax_test.check_grads``.
Raises:
AssertionError: If automatic differentiation gradients deviate from finite
difference gradients.
"""
# Correct scaling.
# Remove after https://github.com/google/jax/issues/3130 is fixed.
atol *= f_args[0].size
# Mock `jax.lax.stop_gradient` because finite diff. method does not honour it.
mock_sg = lambda t: jax.tree_util.tree_map(jnp.ones_like, t)
with mock.patch("jax.lax.stop_gradient", mock_sg):
jax_test.check_grads(f, f_args, order=order, atol=atol, **check_kwargs)
# "static" because tracers can be compared with `None`.
@_static_assertion
def assert_tree_no_nones(tree: ArrayTree) -> None:
"""Checks that a tree does not contain `None`.
Args:
tree: A tree to assert.
Raises:
AssertionError: If the tree contains at least one `None`.
"""
has_nones = False
def _is_leaf(value):
if value is None:
nonlocal has_nones
has_nones = True
return False
treedef = jax.tree_util.tree_structure(tree, is_leaf=_is_leaf)
if has_nones:
raise AssertionError(f"Tree contains `None`(s): {treedef}.")
@_static_assertion
def assert_tree_has_only_ndarrays(tree: ArrayTree) -> None:
"""Checks that all `tree`'s leaves are n-dimensional arrays (tensors).
Args:
tree: A tree to assert.
Raises:
AssertionError: If the tree contains an object which is not an ndarray.
"""
errors = []
def _assert_fn(path, leaf):
if leaf is not None:
if not isinstance(leaf, (np.ndarray, jnp.ndarray)):
nonlocal errors
errors.append((f"Tree leaf '{_ai.format_tree_path(path)}' is not an "
f"ndarray (type={type(leaf)})."))
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
# Only look the sharding attribute after jax version >= 0.3.22 i.e. remove this
# function and use `isinstance(x.sharding, jax.sharding.PmapSharding)` after
# jax version >= 0.3.22.
# This is for backwards compatibility.
def _check_sharding(x):
if hasattr(jax, "Array") and isinstance(x, jax.Array):
if isinstance(x.sharding, jax.sharding.PmapSharding):
return True
else:
return len(x.sharding.device_set) > 1
# pytype: disable=attribute-error
return (
hasattr(jax, "pxla")
and hasattr(jax.pxla, "ShardedDeviceArray")
and isinstance(x, jax.pxla.ShardedDeviceArray)
)
# pytype: enable=attribute-error
@_static_assertion
def assert_tree_is_on_host(
tree: ArrayTree,
*,
allow_cpu_device: bool = True,
allow_sharded_arrays: bool = False,
) -> None:
"""Checks that all leaves are ndarrays residing in the host memory (on CPU).
This assertion only accepts trees consisting of ndarrays.
Args:
tree: A tree to assert.
allow_cpu_device: Whether to allow JAX arrays that reside on a CPU device.
allow_sharded_arrays: Whether to allow sharded JAX arrays. Sharded arrays
are considered "on host" only if they are sharded across CPU devices and
`allow_cpu_device` is `True`.
Raises:
AssertionError: If the tree contains a leaf that is not an ndarray or does
not reside on host.
"""
assert_tree_has_only_ndarrays(tree)
errors = []
def _assert_fn(path, leaf):
if leaf is not None:
if not isinstance(leaf, np.ndarray):
nonlocal errors
if isinstance(leaf, jax.Array):
if _check_sharding(leaf):
# Sharded array.
if not allow_sharded_arrays:
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is sharded and"
f" resides on {leaf.devices()} (sharded arrays are"
" disallowed)."
)
elif allow_cpu_device:
if any(d.platform != "cpu" for d in leaf.devices()):
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is sharded and"
f" resides on {leaf.devices()}."
)
else:
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is sharded and"
f" resides on {leaf.devices()} (CPU devices are disallowed)."
)
elif allow_cpu_device:
# Device array.
if leaf.device().platform != "cpu":
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' resides"
f" on {leaf.device()}."
)
else:
errors.append((f"Tree leaf '{_ai.format_tree_path(path)}' resides "
f"on {leaf.device()} (CPU devices are disallowed)."))
else:
# Not a jax.Array.
errors.append((f"Tree leaf '{_ai.format_tree_path(path)}' has "
f"unexpected type: {type(leaf)}."))
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
@_static_assertion
def assert_tree_is_on_device(tree: ArrayTree,
*,
platform: Union[Sequence[str],
str] = ("gpu", "tpu"),
device: Optional[pytypes.Device] = None) -> None:
"""Checks that all leaves are ndarrays residing in device memory (in HBM).
Sharded DeviceArrays are disallowed.
Args:
tree: A tree to assert.
platform: A platform or a list of platforms where the leaves are expected to
reside. Ignored if `device` is specified.
device: An optional device where the tree's arrays are expected to reside.
Any device (except CPU) is accepted if not specified.
Raises:
AssertionError: If the tree contains a leaf that is not an ndarray or does
not reside on the specified device or platform.
"""
assert_tree_has_only_ndarrays(tree)
# If device is specified, require its platform.
if device is not None:
platform = (device.platform,)
elif not isinstance(platform, collections.abc.Sequence):
platform = (platform,)
errors = []
def _assert_fn(path, leaf):
if leaf is not None:
nonlocal errors
# Check that the leaf is a DeviceArray.
if isinstance(leaf, jax.Array):
if _check_sharding(leaf):
errors.append((f"Tree leaf '{_ai.format_tree_path(path)}' is a "
f"ShardedDeviceArray which are disallowed. "
f" (type={type(leaf)})."))
else: # DeviceArray and not ShardedDeviceArray
# Check the platform.
if leaf.device().platform not in platform:
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' resides on "
f"'{leaf.device().platform}', expected '{platform}'."))
# Check the device.
if device is not None and leaf.device() != device:
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' resides on "
f"{leaf.device()}, expected {device}."))
else:
errors.append((f"Tree leaf '{_ai.format_tree_path(path)}' has "
f"unexpected type: {type(leaf)}."))
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
@_static_assertion
def assert_tree_is_sharded(tree: ArrayTree,
*,
devices: Sequence[pytypes.Device]) -> None:
"""Checks that all leaves are ndarrays sharded across the specified devices.
Args:
tree: A tree to assert.
devices: A list of devices which the tree's leaves are expected to be
sharded across. This list is order-sensitive.
Raises:
AssertionError: If the tree contains a leaf that is not a device array
sharded across the specified devices.
"""
assert_tree_has_only_ndarrays(tree)
errors = []
devices = tuple(devices)
def _assert_fn(path, leaf):
if leaf is not None:
nonlocal errors
# Check that the leaf is a ShardedArray.
if isinstance(leaf, jax.Array):
if _check_sharding(leaf):
shards = tuple(buf.device() for buf in leaf.device_buffers)
if shards != devices:
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is sharded "
f"across {shards} devices, expected {devices}."
)
else:
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is not sharded"
f" (device={leaf.device()})."
)
else:
errors.append(
f"Tree leaf '{_ai.format_tree_path(path)}' is not a "
f"jax.Array (type={type(leaf)})."
)
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
@_static_assertion
def assert_tree_shape_prefix(tree: ArrayTree,
shape_prefix: Sequence[int]) -> None:
"""Checks that all ``tree`` leaves' shapes have the same prefix.
Args:
tree: A tree to check.
shape_prefix: An expected shape prefix.
Raises:
AssertionError: If some leaf's shape doesn't start with ``shape_prefix``.
"""
# To compare with the leaf's `shape`, convert int sequence to tuple.
shape_prefix = tuple(shape_prefix)
if not shape_prefix:
return # No prefix, this is trivially true.
errors = []
def _assert_fn(path, leaf):
nonlocal errors
if len(shape_prefix) > len(leaf.shape):
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' has a shape "
f"of length {leaf.ndim} (shape={leaf.shape}) which is smaller "
f"than the expected prefix of length {len(shape_prefix)} "
f"(prefix={shape_prefix})."))
return
suffix = leaf.shape[:len(shape_prefix)]
if suffix != shape_prefix:
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' has a shape prefix "
f"different from expected: {suffix} != {shape_prefix}."))
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
@_static_assertion
def assert_tree_shape_suffix(
tree: ArrayTree, shape_suffix: Sequence[int]
) -> None:
"""Checks that all ``tree`` leaves' shapes have the same suffix.
Args:
tree: A tree to check.
shape_suffix: An expected shape suffix.
Raises:
AssertionError: If some leaf's shape doesn't start with ``shape_suffix``.
"""
# To compare with the leaf's `shape`, convert int sequence to tuple.
shape_suffix = tuple(shape_suffix)
if not shape_suffix:
return # No suffix, this is trivially true.
errors = []
def _assert_fn(path, leaf):
nonlocal errors
if len(shape_suffix) > len(leaf.shape):
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' has a shape "
f"of length {len(leaf.shape)} (shape={leaf.shape}) which is smaller "
f"than the expected suffix of length {len(shape_suffix)} "
f"(suffix={shape_suffix})."))
return
suffix = leaf.shape[-len(shape_suffix):]
if suffix != shape_suffix:
errors.append(
(f"Tree leaf '{_ai.format_tree_path(path)}' has a shape suffix "
f"different from expected: {suffix} != {shape_suffix}."))
for path, leaf in jax.tree_util.tree_flatten_with_path(tree)[0]:
_assert_fn(_ai.convert_jax_path_to_dm_path(path), leaf)
if errors:
raise AssertionError("\n".join(errors))
@_static_assertion
def assert_trees_all_equal_structs(*trees: ArrayTree) -> None:
"""Checks that trees have the same structure.
Args:
*trees: A sequence of (at least 2) trees to assert equal structure between.
Raises:
ValueError: If ``trees`` does not contain at least 2 elements.
AssertionError: If structures of any two trees are different.
"""
if len(trees) < 2:
raise ValueError(
"assert_trees_all_equal_structs on a single tree does not make sense. "
"Maybe you wrote `assert_trees_all_equal_structs([a, b])` instead of "
"`assert_trees_all_equal_structs(a, b)` ?")
first_treedef = jax.tree_util.tree_structure(trees[0])
other_treedefs = (jax.tree_util.tree_structure(t) for t in trees[1:])
for i, treedef in enumerate(other_treedefs, start=1):
if first_treedef != treedef:
raise AssertionError(
f"Error in tree structs equality check: trees 0 and {i} do not match,"
f"\n tree 0: {first_treedef}"
f"\n tree {i}: {treedef}")
@_static_assertion
def assert_trees_all_equal_comparator(equality_comparator: _ai.TLeavesEqCmpFn,
error_msg_fn: _ai.TLeavesEqCmpErrorFn,
*trees: ArrayTree) -> None:
"""Checks that all trees are equal as per the custom comparator for leaves.
Args:
equality_comparator: A custom function that accepts two leaves and checks
whether they are equal. Expected to be transitive.
error_msg_fn: A function accepting two unequal as per
``equality_comparator`` leaves and returning an error message.
*trees: A sequence of (at least 2) trees to check on equality as per
``equality_comparator``.
Raises:
ValueError: If ``trees`` does not contain at least 2 elements.
AssertionError: if ``equality_comparator`` returns `False` for any pair of
trees from ``trees``.
"""
if len(trees) < 2:
raise ValueError(
"Assertions over only one tree does not make sense. Maybe you wrote "
"`assert_trees_xxx([a, b])` instead of `assert_trees_xxx(a, b)`, or "
"forgot the `error_msg_fn` arg to `assert_trees_all_equal_comparator`?")
assert_trees_all_equal_structs(*trees)
def tree_error_msg_fn(l_1: _ai.TLeaf, l_2: _ai.TLeaf, path: str, i_1: int,
i_2: int):
msg = error_msg_fn(l_1, l_2)
if path:
return f"Trees {i_1} and {i_2} differ in leaves '{path}': {msg}."
else:
return f"Trees (arrays) {i_1} and {i_2} differ: {msg}."
cmp_fn = functools.partial(_ai.assert_leaves_all_eq_comparator,
equality_comparator, tree_error_msg_fn)
# Trees are guaranteed to have the same structure.
paths = [
_ai.convert_jax_path_to_dm_path(path)
for path, _ in jax.tree_util.tree_flatten_with_path(trees[0])[0]]
trees_leaves = [jax.tree_util.tree_leaves(tree) for tree in trees]
for leaf_i, path in enumerate(paths):
cmp_fn(path, *[leaves[leaf_i] for leaves in trees_leaves])
@_static_assertion
def assert_trees_all_equal_dtypes(*trees: ArrayTree) -> None:
"""Checks that trees' leaves have the same dtype.
Args:
*trees: A sequence of (at least 2) trees to check.
Raises:
AssertionError: If leaves' dtypes for any two trees differ.
"""
def cmp_fn(arr_1, arr_2):
return (hasattr(arr_1, "dtype") and hasattr(arr_2, "dtype") and
arr_1.dtype == arr_2.dtype)
def err_msg_fn(arr_1, arr_2):
if not hasattr(arr_1, "dtype"):
return f"{type(arr_1)} is not a (j-)np array (has no `dtype` property)"
if not hasattr(arr_2, "dtype"):
return f"{type(arr_2)} is not a (j-)np array (has no `dtype` property)"
return f"types: {arr_1.dtype} != {arr_2.dtype}"
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
@_static_assertion
def assert_trees_all_equal_sizes(*trees: ArrayTree) -> None:
"""Checks that trees have the same structure and leaves' sizes.
Args:
*trees: A sequence of (at least 2) trees with array leaves.
Raises:
AssertionError: If trees' structures or leaves' sizes are different.
"""
cmp_fn = lambda arr_1, arr_2: arr_1.size == arr_2.size
err_msg_fn = lambda arr_1, arr_2: f"sizes: {arr_1.size} != {arr_2.size}"
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
@_static_assertion
def assert_trees_all_equal_shapes(*trees: ArrayTree) -> None:
"""Checks that trees have the same structure and leaves' shapes.
Args:
*trees: A sequence of (at least 2) trees with array leaves.
Raises:
AssertionError: If trees' structures or leaves' shapes are different.
"""
cmp_fn = lambda arr_1, arr_2: arr_1.shape == arr_2.shape
err_msg_fn = lambda arr_1, arr_2: f"shapes: {arr_1.shape} != {arr_2.shape}"
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
@_static_assertion
def assert_trees_all_equal_shapes_and_dtypes(*trees: ArrayTree) -> None:
"""Checks that trees' leaves have the same shape and dtype.
Args:
*trees: A sequence of (at least 2) trees to check.
Raises:
AssertionError: If leaves' shapes or dtypes for any two trees differ.
"""
assert_trees_all_equal_shapes(*trees)
assert_trees_all_equal_dtypes(*trees)
############# Value assertions. #############
def _assert_tree_all_finite_static(tree_like: ArrayTree) -> None:
"""Checks that all leaves in a tree are finite.
Args:
tree_like: A pytree with array leaves.
Raises:
AssertionError: If any leaf in ``tree_like`` is non-finite.
"""
all_finite = jax.tree_util.tree_all(
jax.tree_util.tree_map(lambda x: np.all(np.isfinite(x)), tree_like))
if not all_finite:
is_finite = lambda x: "Finite" if np.all(np.isfinite(x)) else "Nonfinite"
error_msg = jax.tree_map(is_finite, tree_like)
raise AssertionError(f"Tree contains non-finite value: {error_msg}.")
def _assert_tree_all_finite_jittable(tree_like: ArrayTree) -> Array:
"""A jittable version of `_assert_tree_all_finite_static`."""
labeled_tree = jax.tree_map(
lambda x: jax.lax.select(jnp.isfinite(x).all(), .0, jnp.nan), tree_like
)
predicate = jnp.all(
jnp.isfinite(jnp.asarray(jax.tree_util.tree_leaves(labeled_tree)))
)
checkify.check(
pred=predicate,
msg="Tree contains non-finite value: {tree}.",
tree=labeled_tree,
)
return predicate
assert_tree_all_finite = _value_assertion(
assert_fn=_assert_tree_all_finite_static,
jittable_assert_fn=_assert_tree_all_finite_jittable,
name="assert_tree_all_finite")
@_static_assertion
def _assert_trees_all_equal_static(
*trees: ArrayTree, strict: bool = False
) -> None:
"""Checks that all trees have leaves with *exactly* equal values.
If you are comparing floating point numbers, an exact equality check may not
be appropriate; consider using ``assert_trees_all_close``.
Args:
*trees: A sequence of (at least 2) trees with array leaves.
strict: If True, disable special scalar handling as described in
`np.testing.assert_array_equals` notes section.
Raises:
AssertionError: If the leaf values actual and desired are not exactly equal.
"""
def assert_fn(arr_1, arr_2):
np.testing.assert_array_equal(
_ai.jnp_to_np_array(arr_1),
_ai.jnp_to_np_array(arr_2),
err_msg="Error in value equality check: Values not exactly equal",
strict=strict)
def cmp_fn(arr_1, arr_2) -> bool:
try:
# Raises an AssertionError if values are not equal.
assert_fn(arr_1, arr_2)
except AssertionError:
return False
return True
def err_msg_fn(arr_1, arr_2) -> str:
try:
assert_fn(arr_1, arr_2)
except AssertionError as e:
return (f"{str(e)} \nOriginal dtypes: "
f"{np.asarray(arr_1).dtype}, {np.asarray(arr_2).dtype}")
return ""
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
def _assert_trees_all_equal_jittable(
*trees: ArrayTree, strict: bool = True,
) -> Array:
"""A jittable version of `_assert_trees_all_equal_static`."""
if not strict:
raise NotImplementedError(
"`strict=False` is not implemented by"
" `_assert_trees_all_equal_jittable`. This is a feature of"
" `np.testing.assert_array_equal` used in the static implementation of"
" `assert_trees_all_equal` that we do not implement in the jittable"
" version."
)
err_msg_template = "Values not exactly equal: {arr_1} != {arr_2}."
cmp_fn = lambda x, y: jnp.array_equal(x, y, equal_nan=True)
return _ai.assert_trees_all_eq_comparator_jittable(
cmp_fn, err_msg_template, *trees
)
assert_trees_all_equal = _value_assertion(
assert_fn=_assert_trees_all_equal_static,
jittable_assert_fn=_assert_trees_all_equal_jittable,
name="assert_trees_all_equal",
)
def _assert_trees_all_close_static(*trees: ArrayTree,
rtol: float = 1e-06,
atol: float = .0) -> None:
"""Checks that all trees have leaves with approximately equal values.
This compares the difference between values of actual and desired up to
``atol + rtol * abs(desired)``.
Args:
*trees: A sequence of (at least 2) trees with array leaves.
rtol: A relative tolerance.
atol: An absolute tolerance.
Raises:
AssertionError: If actual and desired values are not equal up to
specified tolerance.
"""
def assert_fn(arr_1, arr_2):
np.testing.assert_allclose(
_ai.jnp_to_np_array(arr_1),
_ai.jnp_to_np_array(arr_2),
rtol=rtol,
atol=atol,
err_msg="Error in value equality check: Values not approximately equal")
def cmp_fn(arr_1, arr_2) -> bool:
try:
# Raises an AssertionError if values are not close.
assert_fn(arr_1, arr_2)
except AssertionError:
return False
return True
def err_msg_fn(arr_1, arr_2) -> str:
try:
assert_fn(arr_1, arr_2)
except AssertionError as e:
return (f"{str(e)} \nOriginal dtypes: "
f"{np.asarray(arr_1).dtype}, {np.asarray(arr_2).dtype}")
return ""
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
def _assert_trees_all_close_jittable(*trees: ArrayTree,
rtol: float = 1e-06,
atol: float = .0) -> Array:
"""A jittable version of `_assert_trees_all_close_static`."""
err_msg_template = (
f"Values not approximately equal ({rtol=}, {atol=}): "
+ "{arr_1} != {arr_2}."
)
cmp_fn = lambda x, y: jnp.isclose(x, y, rtol=rtol, atol=atol).all()
return _ai.assert_trees_all_eq_comparator_jittable(
cmp_fn, err_msg_template, *trees
)
assert_trees_all_close = _value_assertion(
assert_fn=_assert_trees_all_close_static,
jittable_assert_fn=_assert_trees_all_close_jittable,
name="assert_trees_all_close")
def _assert_trees_all_close_ulp_static(
*trees: ArrayTree,
maxulp: int = 1,
) -> None:
"""Checks that tree leaves differ by at most `maxulp` Units in the Last Place.
This is the Chex version of np.testing.assert_array_max_ulp.
Assertions on floating point values are tricky because the precision varies
depending on the value. For example, with float32, the precision at 1 is
np.spacing(np.float32(1.0)) ≈ 1e-7, but the precision at 5,000,000 is only
np.spacing(np.float32(5e6)) = 0.5. This makes it hard to predict ahead of time
what tolerance to use when checking whether two numbers are equal: a
difference of only a couple of bits can equate to an arbitrarily large
absolute difference.
Assertions based on _relative_ differences are one solution to this problem,
but have the disadvantage that it's hard to choose the tolerance. If you want
to verify that two calculations produce _exactly_ the same result
modulo the inherent non-determinism of floating point operations, do you set
the tolerance to...0.01? 0.001? It's hard to be sure you've set it low enough
that you won't miss one of your computations being slightly wrong.
Assertions based on 'units in the last place' (ULP) instead solve this
problem by letting you specify tolerances in terms of the precision actually
available at the current scale of your values. The ULP at some value x is
essentially the spacing between the floating point numbers actually
representable in the vicinity of x - equivalent to the 'precision' we
discussed above. above. With a tolerance of, say, `maxulp=5`, you're saying
that two values are within 5 actually-representable-numbers of each other -
a strong guarantee that two computations are as close as possible to
identical, while still allowing reasonable wiggle room for small differences
due to e.g. different operator orderings.
Note that this function is not currently supported within JIT contexts,
and does not currently support bfloat16 dtypes.
Args:
*trees: A sequence of (at least 2) trees with array leaves.
maxulp: The maximum number of ULPs by which leaves may differ.
Raises:
AssertionError: If actual and desired values are not equal up to
specified tolerance.
"""
def assert_fn(arr_1, arr_2):
if (
getattr(arr_1, "dtype", None) == jnp.bfloat16
or getattr(arr_2, "dtype", None) == jnp.bfloat16
):
# jnp_to_np_array currently converts bfloat16 to float32, which will cause
# assert_array_max_ulp to give incorrect results -
# and assert_array_max_ulp itself does not currently support bfloat16:
# https://github.com/jax-ml/ml_dtypes/issues/56
raise ValueError(
f"{_ai.ERR_PREFIX}ULP assertions are not currently supported for "
"bfloat16."
)
np.testing.assert_array_max_ulp(
_ai.jnp_to_np_array(arr_1),
_ai.jnp_to_np_array(arr_2),
maxulp=maxulp,
)
def cmp_fn(arr_1, arr_2) -> bool:
try:
# Raises an AssertionError if values are not close.
assert_fn(arr_1, arr_2)
except AssertionError:
return False
return True
def err_msg_fn(arr_1, arr_2) -> str:
try:
assert_fn(arr_1, arr_2)
except AssertionError as e:
return (
f"{str(e)} \nOriginal dtypes: "
f"{np.asarray(arr_1).dtype}, {np.asarray(arr_2).dtype}"
)
return ""
assert_trees_all_equal_comparator(cmp_fn, err_msg_fn, *trees)
# The return should be typing.NoReturn, but that would significantly complicate
# the signature of _value_assertion, so we pretend the return is jax.Array.
def _assert_trees_all_close_ulp_jittable(
*trees: ArrayTree,
maxulp: int = 1,
) -> jax.Array:
"""A dummy jittable version of `_assert_trees_all_close_ulp_static`.
JAX does not yet have a native version of assert_array_max_ulp, so at the
moment making ULP assertions on tracer objects simply isn't supported.
This function exists only to make sure a sensible error is given.
Args:
*trees: Ignored.
maxulp: Ignored.
Raises:
NotImplementedError: unconditionally.
Returns:
Never returns. (We pretend jax.Array to satisfy the type checker.)
"""
del trees, maxulp
raise NotImplementedError(
f"{_ai.ERR_PREFIX}assert_trees_all_close_ulp is not supported within JIT "
"contexts."
)
assert_trees_all_close_ulp = _value_assertion(
assert_fn=_assert_trees_all_close_ulp_static,
jittable_assert_fn=_assert_trees_all_close_ulp_jittable,
name="assert_trees_all_close_ulp",
)
|
chex-master
|
chex/_src/asserts.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests dynamic_batching.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from multiprocessing import pool
import time
import dynamic_batching
import tensorflow as tf
from six.moves import range
_SLEEP_TIME = 1.0
class DynamicBatchingTest(tf.test.TestCase):
def test_one(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output = f(tf.constant([[1, 3]]), tf.constant([2]))
tf.train.start_queue_runners()
result, batch_size = session.run(output)
self.assertAllEqual([[3, 5]], result)
self.assertAllEqual([1], batch_size)
def test_two(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([2]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
# Make sure both inputs are in the batcher before starting it.
time.sleep(_SLEEP_TIME)
tf.train.start_queue_runners()
result0, batch_size0 = f0.get()
result1, batch_size1 = f1.get()
self.assertAllEqual([3], result0)
self.assertAllEqual([2], batch_size0)
self.assertAllEqual([5], result1)
self.assertAllEqual([2], batch_size1)
def test_many_small(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
return a + b
outputs = []
for i in range(200):
outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i)))
tf.train.start_queue_runners()
tp = pool.ThreadPool(10)
futures = []
for output in outputs:
futures.append(tp.apply_async(session.run, [output]))
for i, future in enumerate(futures):
result = future.get()
self.assertAllEqual([[i * 2] * 5], result)
def test_input_batch_size_should_be_one(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a):
return a
output = f(tf.constant([1, 2]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'requires batch size 1'):
coord.join()
def test_run_after_error_should_be_cancelled(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a):
return a
output = f(tf.constant([1, 2]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
def test_input_shapes_should_be_equal(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(a, b):
return a + b
output0 = f(tf.constant([1]), tf.constant([2]))
output1 = f(tf.constant([[2]]), tf.constant([3]))
tp = pool.ThreadPool(2)
f0 = tp.apply_async(session.run, [output0])
f1 = tp.apply_async(session.run, [output1])
time.sleep(_SLEEP_TIME)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
f0.get()
f1.get()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Shapes of inputs much be equal'):
coord.join()
def test_output_must_have_batch_dimension(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(_):
return tf.constant(1)
output = f(tf.constant([1]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Output shape must have a batch dimension'):
coord.join()
def test_output_must_have_same_batch_dimension_size_as_input(self):
with self.test_session() as session:
@dynamic_batching.batch_fn
def f(_):
return tf.constant([1, 2, 3, 4])
output = f(tf.constant([1]))
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.CancelledError):
session.run(output)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'Output shape must have the same batch dimension as the input batch '
'size. Expected: 1 Observed: 4'):
coord.join()
def test_get_inputs_cancelled(self):
with tf.Graph().as_default():
@dynamic_batching.batch_fn
def f(a):
return a
f(tf.constant([1]))
# Intentionally using tf.Session() instead of self.test_session() to have
# control over closing the session. test_session() is a cached session.
with tf.Session():
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
# Sleep to make sure the queue runner has started the first run call.
time.sleep(_SLEEP_TIME)
# Session closed.
with self.assertRaisesRegexp(tf.errors.CancelledError,
'GetInputs operation was cancelled'):
coord.join()
def test_batcher_closed(self):
with tf.Graph().as_default():
@dynamic_batching.batch_fn
def f(a):
return a
f(tf.constant([1]))
# Intentionally using tf.Session() instead of self.test_session() to have
# control over closing the session. test_session() is a cached session.
with tf.Session():
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
time.sleep(_SLEEP_TIME)
coord.request_stop() # Calls close operation.
coord.join()
# Session closed.
def test_minimum_batch_size(self):
with self.test_session() as session:
@dynamic_batching.batch_fn_with_options(
minimum_batch_size=2, timeout_ms=1000)
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
output = f(tf.constant([[1, 3]]), tf.constant([2]))
tf.train.start_queue_runners()
start = datetime.datetime.now()
session.run(output)
duration = datetime.datetime.now() - start
# There should have been a timeout here because only one sample was added
# and the minimum batch size is 2.
self.assertLessEqual(.9, duration.total_seconds())
self.assertGreaterEqual(1.5, duration.total_seconds())
outputs = [
f(tf.constant([[1, 3]]), tf.constant([2])),
f(tf.constant([[1, 3]]), tf.constant([2]))
]
start = datetime.datetime.now()
(_, batch_size), _ = session.run(outputs)
duration = datetime.datetime.now() - start
# The outputs should be executed immediately because two samples are
# added.
self.assertGreaterEqual(.5, duration.total_seconds())
self.assertEqual(2, batch_size)
def test_maximum_batch_size(self):
with self.test_session() as session:
@dynamic_batching.batch_fn_with_options(maximum_batch_size=2)
def f(a, b):
batch_size = tf.shape(a)[0]
return a + b, tf.tile([batch_size], [batch_size])
outputs = [
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
f(tf.constant([1]), tf.constant([2])),
]
tf.train.start_queue_runners()
results = session.run(outputs)
for value, batch_size in results:
self.assertEqual(3, value)
self.assertGreaterEqual(2, batch_size)
def test_static_shape(self):
assertions_triggered = [0]
@dynamic_batching.batch_fn_with_options(minimum_batch_size=1,
maximum_batch_size=2)
def f0(a):
self.assertEqual(None, a.shape[0].value)
assertions_triggered[0] += 1
return a
@dynamic_batching.batch_fn_with_options(minimum_batch_size=2,
maximum_batch_size=2)
def f1(a):
# Even though minimum_batch_size and maximum_batch_size are equal, the
# timeout can cause a batch with less than mininum_batch_size.
self.assertEqual(None, a.shape[0].value)
assertions_triggered[0] += 1
return a
@dynamic_batching.batch_fn_with_options(minimum_batch_size=2,
maximum_batch_size=2,
timeout_ms=None)
def f2(a):
# When timeout is disabled and minimum/maximum batch size are equal, the
# shape is statically known.
self.assertEqual(2, a.shape[0].value)
assertions_triggered[0] += 1
return a
f0(tf.constant([1]))
f1(tf.constant([1]))
f2(tf.constant([1]))
self.assertEqual(3, assertions_triggered[0])
def test_out_of_order_execution1(self):
with self.test_session() as session:
batcher = dynamic_batching._Batcher(minimum_batch_size=1,
maximum_batch_size=1,
timeout_ms=None)
tp = pool.ThreadPool(10)
r0 = tp.apply_async(session.run, batcher.compute([[1]], [tf.int32]))
(input0,), computation_id0 = session.run(batcher.get_inputs([tf.int32]))
r1 = tp.apply_async(session.run, batcher.compute([[2]], [tf.int32]))
(input1,), computation_id1 = session.run(batcher.get_inputs([tf.int32]))
self.assertAllEqual([1], input0)
self.assertAllEqual([2], input1)
session.run(batcher.set_outputs([input0 + 42], computation_id0))
session.run(batcher.set_outputs([input1 + 42], computation_id1))
self.assertAllEqual([43], r0.get())
self.assertAllEqual([44], r1.get())
def test_out_of_order_execution2(self):
with self.test_session() as session:
batcher = dynamic_batching._Batcher(minimum_batch_size=1,
maximum_batch_size=1,
timeout_ms=None)
tp = pool.ThreadPool(10)
r0 = tp.apply_async(session.run, batcher.compute([[1]], [tf.int32]))
(input0,), computation_id0 = session.run(batcher.get_inputs([tf.int32]))
r1 = tp.apply_async(session.run, batcher.compute([[2]], [tf.int32]))
(input1,), computation_id1 = session.run(batcher.get_inputs([tf.int32]))
self.assertAllEqual([1], input0)
self.assertAllEqual([2], input1)
# These two runs are switched from testOutOfOrderExecution1.
session.run(batcher.set_outputs([input1 + 42], computation_id1))
session.run(batcher.set_outputs([input0 + 42], computation_id0))
self.assertAllEqual([43], r0.get())
self.assertAllEqual([44], r1.get())
def test_invalid_computation_id(self):
with self.test_session() as session:
batcher = dynamic_batching._Batcher(minimum_batch_size=1,
maximum_batch_size=1,
timeout_ms=None)
tp = pool.ThreadPool(10)
tp.apply_async(session.run, batcher.compute([[1]], [tf.int32]))
(input0,), _ = session.run(batcher.get_inputs([tf.int32]))
self.assertAllEqual([1], input0)
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
'Invalid computation id'):
session.run(batcher.set_outputs([input0], 42))
def test_op_shape(self):
with self.test_session():
batcher = dynamic_batching._Batcher(minimum_batch_size=1,
maximum_batch_size=1,
timeout_ms=None)
_, computation_id = batcher.get_inputs([tf.int32])
self.assertEqual([], computation_id.shape)
class DynamicBatchingBenchmarks(tf.test.Benchmark):
def benchmark_batching_small(self):
with tf.Session() as session:
@dynamic_batching.batch_fn
def f(a, b):
return a + b
outputs = []
for _ in range(1000):
outputs.append(f(tf.ones([1, 10]), tf.ones([1, 10])))
op_to_benchmark = tf.group(*outputs)
tf.train.start_queue_runners()
self.run_op_benchmark(
name='batching_many_small',
sess=session,
op_or_tensor=op_to_benchmark,
burn_iters=10,
min_iters=50)
def benchmark_batching_large(self):
with tf.Session() as session:
@dynamic_batching.batch_fn
def f(a, b):
return a + b
outputs = []
for _ in range(1000):
outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000])))
op_to_benchmark = tf.group(*outputs)
tf.train.start_queue_runners()
self.run_op_benchmark(
name='batching_many_large',
sess=session,
op_or_tensor=op_to_benchmark,
burn_iters=10,
min_iters=50)
if __name__ == '__main__':
tf.test.main()
|
scalable_agent-master
|
dynamic_batching_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for V-trace.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import vtrace
def _shaped_arange(*shape):
"""Runs np.arange, converts to float and reshapes."""
return np.arange(np.prod(shape), dtype=np.float32).reshape(*shape)
def _softmax(logits):
"""Applies softmax non-linearity on inputs."""
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def _ground_truth_calculation(discounts, log_rhos, rewards, values,
bootstrap_value, clip_rho_threshold,
clip_pg_rho_threshold):
"""Calculates the ground truth for V-trace in Python/Numpy."""
vs = []
seq_len = len(discounts)
rhos = np.exp(log_rhos)
cs = np.minimum(rhos, 1.0)
clipped_rhos = rhos
if clip_rho_threshold:
clipped_rhos = np.minimum(rhos, clip_rho_threshold)
clipped_pg_rhos = rhos
if clip_pg_rho_threshold:
clipped_pg_rhos = np.minimum(rhos, clip_pg_rho_threshold)
# This is a very inefficient way to calculate the V-trace ground truth.
# We calculate it this way because it is close to the mathematical notation of
# V-trace.
# v_s = V(x_s)
# + \sum^{T-1}_{t=s} \gamma^{t-s}
# * \prod_{i=s}^{t-1} c_i
# * \rho_t (r_t + \gamma V(x_{t+1}) - V(x_t))
# Note that when we take the product over c_i, we write `s:t` as the notation
# of the paper is inclusive of the `t-1`, but Python is exclusive.
# Also note that np.prod([]) == 1.
values_t_plus_1 = np.concatenate([values, bootstrap_value[None, :]], axis=0)
for s in range(seq_len):
v_s = np.copy(values[s]) # Very important copy.
for t in range(s, seq_len):
v_s += (
np.prod(discounts[s:t], axis=0) * np.prod(cs[s:t],
axis=0) * clipped_rhos[t] *
(rewards[t] + discounts[t] * values_t_plus_1[t + 1] - values[t]))
vs.append(v_s)
vs = np.stack(vs, axis=0)
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * np.concatenate(
[vs[1:], bootstrap_value[None, :]], axis=0) - values))
return vtrace.VTraceReturns(vs=vs, pg_advantages=pg_advantages)
class LogProbsFromLogitsAndActionsTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(('Batch1', 1), ('Batch2', 2))
def test_log_probs_from_logits_and_actions(self, batch_size):
"""Tests log_probs_from_logits_and_actions."""
seq_len = 7
num_actions = 3
policy_logits = _shaped_arange(seq_len, batch_size, num_actions) + 10
actions = np.random.randint(
0, num_actions, size=(seq_len, batch_size), dtype=np.int32)
action_log_probs_tensor = vtrace.log_probs_from_logits_and_actions(
policy_logits, actions)
# Ground Truth
# Using broadcasting to create a mask that indexes action logits
action_index_mask = actions[..., None] == np.arange(num_actions)
def index_with_mask(array, mask):
return array[mask].reshape(*array.shape[:-1])
# Note: Normally log(softmax) is not a good idea because it's not
# numerically stable. However, in this test we have well-behaved values.
ground_truth_v = index_with_mask(
np.log(_softmax(policy_logits)), action_index_mask)
with self.test_session() as session:
self.assertAllClose(ground_truth_v, session.run(action_log_probs_tensor))
class VtraceTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('Batch1', 1), ('Batch5', 5))
def test_vtrace(self, batch_size):
"""Tests V-trace against ground truth data calculated in python."""
seq_len = 5
# Create log_rhos such that rho will span from near-zero to above the
# clipping thresholds. In particular, calculate log_rhos in [-2.5, 2.5),
# so that rho is in approx [0.08, 12.2).
log_rhos = _shaped_arange(seq_len, batch_size) / (batch_size * seq_len)
log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).
values = {
'log_rhos': log_rhos,
# T, B where B_i: [0.9 / (i+1)] * T
'discounts':
np.array([[0.9 / (b + 1)
for b in range(batch_size)]
for _ in range(seq_len)]),
'rewards':
_shaped_arange(seq_len, batch_size),
'values':
_shaped_arange(seq_len, batch_size) / batch_size,
'bootstrap_value':
_shaped_arange(batch_size) + 1.0,
'clip_rho_threshold':
3.7,
'clip_pg_rho_threshold':
2.2,
}
output = vtrace.from_importance_weights(**values)
with self.test_session() as session:
output_v = session.run(output)
ground_truth_v = _ground_truth_calculation(**values)
for a, b in zip(ground_truth_v, output_v):
self.assertAllClose(a, b)
@parameterized.named_parameters(('Batch1', 1), ('Batch2', 2))
def test_vtrace_from_logits(self, batch_size):
"""Tests V-trace calculated from logits."""
seq_len = 5
num_actions = 3
clip_rho_threshold = None # No clipping.
clip_pg_rho_threshold = None # No clipping.
# Intentionally leaving shapes unspecified to test if V-trace can
# deal with that.
placeholders = {
# T, B, NUM_ACTIONS
'behaviour_policy_logits':
tf.placeholder(dtype=tf.float32, shape=[None, None, None]),
# T, B, NUM_ACTIONS
'target_policy_logits':
tf.placeholder(dtype=tf.float32, shape=[None, None, None]),
'actions':
tf.placeholder(dtype=tf.int32, shape=[None, None]),
'discounts':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'rewards':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'values':
tf.placeholder(dtype=tf.float32, shape=[None, None]),
'bootstrap_value':
tf.placeholder(dtype=tf.float32, shape=[None]),
}
from_logits_output = vtrace.from_logits(
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
**placeholders)
target_log_probs = vtrace.log_probs_from_logits_and_actions(
placeholders['target_policy_logits'], placeholders['actions'])
behaviour_log_probs = vtrace.log_probs_from_logits_and_actions(
placeholders['behaviour_policy_logits'], placeholders['actions'])
log_rhos = target_log_probs - behaviour_log_probs
ground_truth = (log_rhos, behaviour_log_probs, target_log_probs)
values = {
'behaviour_policy_logits':
_shaped_arange(seq_len, batch_size, num_actions),
'target_policy_logits':
_shaped_arange(seq_len, batch_size, num_actions),
'actions':
np.random.randint(0, num_actions - 1, size=(seq_len, batch_size)),
'discounts':
np.array( # T, B where B_i: [0.9 / (i+1)] * T
[[0.9 / (b + 1)
for b in range(batch_size)]
for _ in range(seq_len)]),
'rewards':
_shaped_arange(seq_len, batch_size),
'values':
_shaped_arange(seq_len, batch_size) / batch_size,
'bootstrap_value':
_shaped_arange(batch_size) + 1.0, # B
}
feed_dict = {placeholders[k]: v for k, v in values.items()}
with self.test_session() as session:
from_logits_output_v = session.run(
from_logits_output, feed_dict=feed_dict)
(ground_truth_log_rhos, ground_truth_behaviour_action_log_probs,
ground_truth_target_action_log_probs) = session.run(
ground_truth, feed_dict=feed_dict)
# Calculate V-trace using the ground truth logits.
from_iw = vtrace.from_importance_weights(
log_rhos=ground_truth_log_rhos,
discounts=values['discounts'],
rewards=values['rewards'],
values=values['values'],
bootstrap_value=values['bootstrap_value'],
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
with self.test_session() as session:
from_iw_v = session.run(from_iw)
self.assertAllClose(from_iw_v.vs, from_logits_output_v.vs)
self.assertAllClose(from_iw_v.pg_advantages,
from_logits_output_v.pg_advantages)
self.assertAllClose(ground_truth_behaviour_action_log_probs,
from_logits_output_v.behaviour_action_log_probs)
self.assertAllClose(ground_truth_target_action_log_probs,
from_logits_output_v.target_action_log_probs)
self.assertAllClose(ground_truth_log_rhos, from_logits_output_v.log_rhos)
def test_higher_rank_inputs_for_importance_weights(self):
"""Checks support for additional dimensions in inputs."""
placeholders = {
'log_rhos': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'discounts': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'rewards': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'values': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'bootstrap_value': tf.placeholder(dtype=tf.float32, shape=[None, 42])
}
output = vtrace.from_importance_weights(**placeholders)
self.assertEqual(output.vs.shape.as_list()[-1], 42)
def test_inconsistent_rank_inputs_for_importance_weights(self):
"""Test one of many possible errors in shape of inputs."""
placeholders = {
'log_rhos': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'discounts': tf.placeholder(dtype=tf.float32, shape=[None, None, 1]),
'rewards': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
'values': tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
# Should be [None, 42].
'bootstrap_value': tf.placeholder(dtype=tf.float32, shape=[None])
}
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
vtrace.from_importance_weights(**placeholders)
if __name__ == '__main__':
tf.test.main()
|
scalable_agent-master
|
vtrace_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for DMLab-30."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
LEVEL_MAPPING = collections.OrderedDict([
('rooms_collect_good_objects_train', 'rooms_collect_good_objects_test'),
('rooms_exploit_deferred_effects_train',
'rooms_exploit_deferred_effects_test'),
('rooms_select_nonmatching_object', 'rooms_select_nonmatching_object'),
('rooms_watermaze', 'rooms_watermaze'),
('rooms_keys_doors_puzzle', 'rooms_keys_doors_puzzle'),
('language_select_described_object', 'language_select_described_object'),
('language_select_located_object', 'language_select_located_object'),
('language_execute_random_task', 'language_execute_random_task'),
('language_answer_quantitative_question',
'language_answer_quantitative_question'),
('lasertag_one_opponent_small', 'lasertag_one_opponent_small'),
('lasertag_three_opponents_small', 'lasertag_three_opponents_small'),
('lasertag_one_opponent_large', 'lasertag_one_opponent_large'),
('lasertag_three_opponents_large', 'lasertag_three_opponents_large'),
('natlab_fixed_large_map', 'natlab_fixed_large_map'),
('natlab_varying_map_regrowth', 'natlab_varying_map_regrowth'),
('natlab_varying_map_randomized', 'natlab_varying_map_randomized'),
('skymaze_irreversible_path_hard', 'skymaze_irreversible_path_hard'),
('skymaze_irreversible_path_varied', 'skymaze_irreversible_path_varied'),
('psychlab_arbitrary_visuomotor_mapping',
'psychlab_arbitrary_visuomotor_mapping'),
('psychlab_continuous_recognition', 'psychlab_continuous_recognition'),
('psychlab_sequential_comparison', 'psychlab_sequential_comparison'),
('psychlab_visual_search', 'psychlab_visual_search'),
('explore_object_locations_small', 'explore_object_locations_small'),
('explore_object_locations_large', 'explore_object_locations_large'),
('explore_obstructed_goals_small', 'explore_obstructed_goals_small'),
('explore_obstructed_goals_large', 'explore_obstructed_goals_large'),
('explore_goal_locations_small', 'explore_goal_locations_small'),
('explore_goal_locations_large', 'explore_goal_locations_large'),
('explore_object_rewards_few', 'explore_object_rewards_few'),
('explore_object_rewards_many', 'explore_object_rewards_many'),
])
HUMAN_SCORES = {
'rooms_collect_good_objects_test': 10,
'rooms_exploit_deferred_effects_test': 85.65,
'rooms_select_nonmatching_object': 65.9,
'rooms_watermaze': 54,
'rooms_keys_doors_puzzle': 53.8,
'language_select_described_object': 389.5,
'language_select_located_object': 280.7,
'language_execute_random_task': 254.05,
'language_answer_quantitative_question': 184.5,
'lasertag_one_opponent_small': 12.65,
'lasertag_three_opponents_small': 18.55,
'lasertag_one_opponent_large': 18.6,
'lasertag_three_opponents_large': 31.5,
'natlab_fixed_large_map': 36.9,
'natlab_varying_map_regrowth': 24.45,
'natlab_varying_map_randomized': 42.35,
'skymaze_irreversible_path_hard': 100,
'skymaze_irreversible_path_varied': 100,
'psychlab_arbitrary_visuomotor_mapping': 58.75,
'psychlab_continuous_recognition': 58.3,
'psychlab_sequential_comparison': 39.5,
'psychlab_visual_search': 78.5,
'explore_object_locations_small': 74.45,
'explore_object_locations_large': 65.65,
'explore_obstructed_goals_small': 206,
'explore_obstructed_goals_large': 119.5,
'explore_goal_locations_small': 267.5,
'explore_goal_locations_large': 194.5,
'explore_object_rewards_few': 77.7,
'explore_object_rewards_many': 106.7,
}
RANDOM_SCORES = {
'rooms_collect_good_objects_test': 0.073,
'rooms_exploit_deferred_effects_test': 8.501,
'rooms_select_nonmatching_object': 0.312,
'rooms_watermaze': 4.065,
'rooms_keys_doors_puzzle': 4.135,
'language_select_described_object': -0.07,
'language_select_located_object': 1.929,
'language_execute_random_task': -5.913,
'language_answer_quantitative_question': -0.33,
'lasertag_one_opponent_small': -0.224,
'lasertag_three_opponents_small': -0.214,
'lasertag_one_opponent_large': -0.083,
'lasertag_three_opponents_large': -0.102,
'natlab_fixed_large_map': 2.173,
'natlab_varying_map_regrowth': 2.989,
'natlab_varying_map_randomized': 7.346,
'skymaze_irreversible_path_hard': 0.1,
'skymaze_irreversible_path_varied': 14.4,
'psychlab_arbitrary_visuomotor_mapping': 0.163,
'psychlab_continuous_recognition': 0.224,
'psychlab_sequential_comparison': 0.129,
'psychlab_visual_search': 0.085,
'explore_object_locations_small': 3.575,
'explore_object_locations_large': 4.673,
'explore_obstructed_goals_small': 6.76,
'explore_obstructed_goals_large': 2.61,
'explore_goal_locations_small': 7.66,
'explore_goal_locations_large': 3.14,
'explore_object_rewards_few': 2.073,
'explore_object_rewards_many': 2.438,
}
ALL_LEVELS = frozenset([
'rooms_collect_good_objects_train',
'rooms_collect_good_objects_test',
'rooms_exploit_deferred_effects_train',
'rooms_exploit_deferred_effects_test',
'rooms_select_nonmatching_object',
'rooms_watermaze',
'rooms_keys_doors_puzzle',
'language_select_described_object',
'language_select_located_object',
'language_execute_random_task',
'language_answer_quantitative_question',
'lasertag_one_opponent_small',
'lasertag_three_opponents_small',
'lasertag_one_opponent_large',
'lasertag_three_opponents_large',
'natlab_fixed_large_map',
'natlab_varying_map_regrowth',
'natlab_varying_map_randomized',
'skymaze_irreversible_path_hard',
'skymaze_irreversible_path_varied',
'psychlab_arbitrary_visuomotor_mapping',
'psychlab_continuous_recognition',
'psychlab_sequential_comparison',
'psychlab_visual_search',
'explore_object_locations_small',
'explore_object_locations_large',
'explore_obstructed_goals_small',
'explore_obstructed_goals_large',
'explore_goal_locations_small',
'explore_goal_locations_large',
'explore_object_rewards_few',
'explore_object_rewards_many',
])
def _transform_level_returns(level_returns):
"""Converts training level names to test level names."""
new_level_returns = {}
for level_name, returns in level_returns.iteritems():
new_level_returns[LEVEL_MAPPING.get(level_name, level_name)] = returns
test_set = set(LEVEL_MAPPING.values())
diff = test_set - set(new_level_returns.keys())
if diff:
raise ValueError('Missing levels: %s' % list(diff))
for level_name, returns in new_level_returns.iteritems():
if level_name in test_set:
if not returns:
raise ValueError('Missing returns for level: \'%s\': ' % level_name)
else:
tf.logging.info('Skipping level %s for calculation.', level_name)
return new_level_returns
def compute_human_normalized_score(level_returns, per_level_cap):
"""Computes human normalized score.
Levels that have different training and test versions, will use the returns
for the training level to calculate the score. E.g.
'rooms_collect_good_objects_train' will be used for
'rooms_collect_good_objects_test'. All returns for levels not in DmLab-30
will be ignored.
Args:
level_returns: A dictionary from level to list of episode returns.
per_level_cap: A percentage cap (e.g. 100.) on the per level human
normalized score. If None, no cap is applied.
Returns:
A float with the human normalized score in percentage.
Raises:
ValueError: If a level is missing from `level_returns` or has no returns.
"""
new_level_returns = _transform_level_returns(level_returns)
def human_normalized_score(level_name, returns):
score = np.mean(returns)
human = HUMAN_SCORES[level_name]
random = RANDOM_SCORES[level_name]
human_normalized_score = (score - random) / (human - random) * 100
if per_level_cap is not None:
human_normalized_score = min(human_normalized_score, per_level_cap)
return human_normalized_score
return np.mean(
[human_normalized_score(k, v) for k, v in new_level_returns.items()])
|
scalable_agent-master
|
dmlab30.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests py_process.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import time
import numpy as np
import py_process
import tensorflow as tf
from six.moves import range
class PyProcessTest(tf.test.TestCase):
def test_small(self):
class Example(object):
def __init__(self, a):
self._a = a
def inc(self):
self._a += 1
def compute(self, b):
return np.array(self._a + b, dtype=np.int32)
@staticmethod
def _tensor_specs(method_name, unused_args, unused_constructor_kwargs):
if method_name == 'compute':
return tf.contrib.framework.TensorSpec([], tf.int32)
elif method_name == 'inc':
return ()
with tf.Graph().as_default():
p = py_process.PyProcess(Example, 1)
inc = p.proxy.inc()
compute = p.proxy.compute(2)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
self.assertTrue(isinstance(inc, tf.Operation))
session.run(inc)
self.assertEqual([], compute.shape)
self.assertEqual(4, session.run(compute))
def test_threading(self):
class Example(object):
def __init__(self):
pass
def wait(self):
time.sleep(.2)
return None
@staticmethod
def _tensor_specs(method_name, unused_args, unused_constructor_kwargs):
if method_name == 'wait':
return tf.contrib.framework.TensorSpec([], tf.int32)
with tf.Graph().as_default():
p = py_process.PyProcess(Example)
wait = p.proxy.wait()
hook = py_process.PyProcessHook()
with tf.train.SingularMonitoredSession(hooks=[hook]) as session:
def run():
with self.assertRaises(tf.errors.OutOfRangeError):
session.run(wait)
t = self.checkedThread(target=run)
t.start()
time.sleep(.1)
t.join()
def test_args(self):
class Example(object):
def __init__(self, dim0):
self._dim0 = dim0
def compute(self, dim1):
return np.zeros([self._dim0, dim1], dtype=np.int32)
@staticmethod
def _tensor_specs(method_name, kwargs, constructor_kwargs):
dim0 = constructor_kwargs['dim0']
dim1 = kwargs['dim1']
if method_name == 'compute':
return tf.contrib.framework.TensorSpec([dim0, dim1], tf.int32)
with tf.Graph().as_default():
p = py_process.PyProcess(Example, 1)
result = p.proxy.compute(2)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
self.assertEqual([1, 2], result.shape)
self.assertAllEqual([[0, 0]], session.run(result))
def test_error_handling_constructor(self):
class Example(object):
def __init__(self):
raise ValueError('foo')
def something(self):
pass
@staticmethod
def _tensor_specs(method_name, unused_kwargs, unused_constructor_kwargs):
if method_name == 'something':
return ()
with tf.Graph().as_default():
py_process.PyProcess(Example, 1)
with self.assertRaisesRegexp(Exception, 'foo'):
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]):
pass
def test_error_handling_method(self):
class Example(object):
def __init__(self):
pass
def something(self):
raise ValueError('foo')
@staticmethod
def _tensor_specs(method_name, unused_kwargs, unused_constructor_kwargs):
if method_name == 'something':
return ()
with tf.Graph().as_default():
p = py_process.PyProcess(Example, 1)
result = p.proxy.something()
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
with self.assertRaisesRegexp(Exception, 'foo'):
session.run(result)
def test_close(self):
with tempfile.NamedTemporaryFile() as tmp:
class Example(object):
def __init__(self, filename):
self._filename = filename
def close(self):
with tf.gfile.Open(self._filename, 'w') as f:
f.write('was_closed')
with tf.Graph().as_default():
py_process.PyProcess(Example, tmp.name)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]):
pass
self.assertEqual('was_closed', tmp.read())
def test_close_on_error(self):
with tempfile.NamedTemporaryFile() as tmp:
class Example(object):
def __init__(self, filename):
self._filename = filename
def something(self):
raise ValueError('foo')
def close(self):
with tf.gfile.Open(self._filename, 'w') as f:
f.write('was_closed')
@staticmethod
def _tensor_specs(method_name, unused_kwargs,
unused_constructor_kwargs):
if method_name == 'something':
return ()
with tf.Graph().as_default():
p = py_process.PyProcess(Example, tmp.name)
result = p.proxy.something()
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
with self.assertRaisesRegexp(Exception, 'foo'):
session.run(result)
self.assertEqual('was_closed', tmp.read())
class PyProcessBenchmarks(tf.test.Benchmark):
class Example(object):
def __init__(self):
self._result = np.random.randint(0, 256, (72, 96, 3), np.uint8)
def compute(self, unused_a):
return self._result
@staticmethod
def _tensor_specs(method_name, unused_args, unused_constructor_kwargs):
if method_name == 'compute':
return tf.contrib.framework.TensorSpec([72, 96, 3], tf.uint8)
def benchmark_one(self):
with tf.Graph().as_default():
p = py_process.PyProcess(PyProcessBenchmarks.Example)
compute = p.proxy.compute(2)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
self.run_op_benchmark(
name='process_one',
sess=session,
op_or_tensor=compute,
burn_iters=10,
min_iters=5000)
def benchmark_many(self):
with tf.Graph().as_default():
ps = [
py_process.PyProcess(PyProcessBenchmarks.Example) for _ in range(200)
]
compute_ops = [p.proxy.compute(2) for p in ps]
compute = tf.group(*compute_ops)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
self.run_op_benchmark(
name='process_many',
sess=session,
op_or_tensor=compute,
burn_iters=10,
min_iters=500)
if __name__ == '__main__':
tf.test.main()
|
scalable_agent-master
|
py_process_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Importance Weighted Actor-Learner Architectures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
import os
import sys
import dmlab30
import environments
import numpy as np
import py_process
import sonnet as snt
import tensorflow as tf
import vtrace
try:
import dynamic_batching
except tf.errors.NotFoundError:
tf.logging.warning('Running without dynamic batching.')
from six.moves import range
nest = tf.contrib.framework.nest
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string('logdir', '/tmp/agent', 'TensorFlow log directory.')
flags.DEFINE_enum('mode', 'train', ['train', 'test'], 'Training or test mode.')
# Flags used for testing.
flags.DEFINE_integer('test_num_episodes', 10, 'Number of episodes per level.')
# Flags used for distributed training.
flags.DEFINE_integer('task', -1, 'Task id. Use -1 for local training.')
flags.DEFINE_enum('job_name', 'learner', ['learner', 'actor'],
'Job name. Ignored when task is set to -1.')
# Training.
flags.DEFINE_integer('total_environment_frames', int(1e9),
'Total environment frames to train for.')
flags.DEFINE_integer('num_actors', 4, 'Number of actors.')
flags.DEFINE_integer('batch_size', 2, 'Batch size for training.')
flags.DEFINE_integer('unroll_length', 100, 'Unroll length in agent steps.')
flags.DEFINE_integer('num_action_repeats', 4, 'Number of action repeats.')
flags.DEFINE_integer('seed', 1, 'Random seed.')
# Loss settings.
flags.DEFINE_float('entropy_cost', 0.00025, 'Entropy cost/multiplier.')
flags.DEFINE_float('baseline_cost', .5, 'Baseline cost/multiplier.')
flags.DEFINE_float('discounting', .99, 'Discounting factor.')
flags.DEFINE_enum('reward_clipping', 'abs_one', ['abs_one', 'soft_asymmetric'],
'Reward clipping.')
# Environment settings.
flags.DEFINE_string(
'dataset_path', '',
'Path to dataset needed for psychlab_*, see '
'https://github.com/deepmind/lab/tree/master/data/brady_konkle_oliva2008')
flags.DEFINE_string('level_name', 'explore_goal_locations_small',
'''Level name or \'dmlab30\' for the full DmLab-30 suite '''
'''with levels assigned round robin to the actors.''')
flags.DEFINE_integer('width', 96, 'Width of observation.')
flags.DEFINE_integer('height', 72, 'Height of observation.')
# Optimizer settings.
flags.DEFINE_float('learning_rate', 0.00048, 'Learning rate.')
flags.DEFINE_float('decay', .99, 'RMSProp optimizer decay.')
flags.DEFINE_float('momentum', 0., 'RMSProp momentum.')
flags.DEFINE_float('epsilon', .1, 'RMSProp epsilon.')
# Structure to be sent from actors to learner.
ActorOutput = collections.namedtuple(
'ActorOutput', 'level_name agent_state env_outputs agent_outputs')
AgentOutput = collections.namedtuple('AgentOutput',
'action policy_logits baseline')
def is_single_machine():
return FLAGS.task == -1
class Agent(snt.RNNCore):
"""Agent with ResNet."""
def __init__(self, num_actions):
super(Agent, self).__init__(name='agent')
self._num_actions = num_actions
with self._enter_variable_scope():
self._core = tf.contrib.rnn.LSTMBlockCell(256)
def initial_state(self, batch_size):
return self._core.zero_state(batch_size, tf.float32)
def _instruction(self, instruction):
# Split string.
splitted = tf.string_split(instruction)
dense = tf.sparse_tensor_to_dense(splitted, default_value='')
length = tf.reduce_sum(tf.to_int32(tf.not_equal(dense, '')), axis=1)
# To int64 hash buckets. Small risk of having collisions. Alternatively, a
# vocabulary can be used.
num_hash_buckets = 1000
buckets = tf.string_to_hash_bucket_fast(dense, num_hash_buckets)
# Embed the instruction. Embedding size 20 seems to be enough.
embedding_size = 20
embedding = snt.Embed(num_hash_buckets, embedding_size)(buckets)
# Pad to make sure there is at least one output.
padding = tf.to_int32(tf.equal(tf.shape(embedding)[1], 0))
embedding = tf.pad(embedding, [[0, 0], [0, padding], [0, 0]])
core = tf.contrib.rnn.LSTMBlockCell(64, name='language_lstm')
output, _ = tf.nn.dynamic_rnn(core, embedding, length, dtype=tf.float32)
# Return last output.
return tf.reverse_sequence(output, length, seq_axis=1)[:, 0]
def _torso(self, input_):
last_action, env_output = input_
reward, _, _, (frame, instruction) = env_output
# Convert to floats.
frame = tf.to_float(frame)
frame /= 255
with tf.variable_scope('convnet'):
conv_out = frame
for i, (num_ch, num_blocks) in enumerate([(16, 2), (32, 2), (32, 2)]):
# Downscale.
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.pool(
conv_out,
window_shape=[3, 3],
pooling_type='MAX',
padding='SAME',
strides=[2, 2])
# Residual block(s).
for j in range(num_blocks):
with tf.variable_scope('residual_%d_%d' % (i, j)):
block_input = conv_out
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out += block_input
conv_out = tf.nn.relu(conv_out)
conv_out = snt.BatchFlatten()(conv_out)
conv_out = snt.Linear(256)(conv_out)
conv_out = tf.nn.relu(conv_out)
instruction_out = self._instruction(instruction)
# Append clipped last reward and one hot last action.
clipped_reward = tf.expand_dims(tf.clip_by_value(reward, -1, 1), -1)
one_hot_last_action = tf.one_hot(last_action, self._num_actions)
return tf.concat(
[conv_out, clipped_reward, one_hot_last_action, instruction_out],
axis=1)
def _head(self, core_output):
policy_logits = snt.Linear(self._num_actions, name='policy_logits')(
core_output)
baseline = tf.squeeze(snt.Linear(1, name='baseline')(core_output), axis=-1)
# Sample an action from the policy.
new_action = tf.multinomial(policy_logits, num_samples=1,
output_dtype=tf.int32)
new_action = tf.squeeze(new_action, 1, name='new_action')
return AgentOutput(new_action, policy_logits, baseline)
def _build(self, input_, core_state):
action, env_output = input_
actions, env_outputs = nest.map_structure(lambda t: tf.expand_dims(t, 0),
(action, env_output))
outputs, core_state = self.unroll(actions, env_outputs, core_state)
return nest.map_structure(lambda t: tf.squeeze(t, 0), outputs), core_state
@snt.reuse_variables
def unroll(self, actions, env_outputs, core_state):
_, _, done, _ = env_outputs
torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))
# Note, in this implementation we can't use CuDNN RNN to speed things up due
# to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
# changed to implement snt.LSTMCell).
initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
core_output_list = []
for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = nest.map_structure(functools.partial(tf.where, d),
initial_core_state, core_state)
core_output, core_state = self._core(input_, core_state)
core_output_list.append(core_output)
return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state
def build_actor(agent, env, level_name, action_set):
"""Builds the actor loop."""
# Initial values.
initial_env_output, initial_env_state = env.initial()
initial_agent_state = agent.initial_state(1)
initial_action = tf.zeros([1], dtype=tf.int32)
dummy_agent_output, _ = agent(
(initial_action,
nest.map_structure(lambda t: tf.expand_dims(t, 0), initial_env_output)),
initial_agent_state)
initial_agent_output = nest.map_structure(
lambda t: tf.zeros(t.shape, t.dtype), dummy_agent_output)
# All state that needs to persist across training iterations. This includes
# the last environment output, agent state and last agent output. These
# variables should never go on the parameter servers.
def create_state(t):
# Creates a unique variable scope to ensure the variable name is unique.
with tf.variable_scope(None, default_name='state'):
return tf.get_local_variable(t.op.name, initializer=t, use_resource=True)
persistent_state = nest.map_structure(
create_state, (initial_env_state, initial_env_output, initial_agent_state,
initial_agent_output))
def step(input_, unused_i):
"""Steps through the agent and the environment."""
env_state, env_output, agent_state, agent_output = input_
# Run agent.
action = agent_output[0]
batched_env_output = nest.map_structure(lambda t: tf.expand_dims(t, 0),
env_output)
agent_output, agent_state = agent((action, batched_env_output), agent_state)
# Convert action index to the native action.
action = agent_output[0][0]
raw_action = tf.gather(action_set, action)
env_output, env_state = env.step(raw_action, env_state)
return env_state, env_output, agent_state, agent_output
# Run the unroll. `read_value()` is needed to make sure later usage will
# return the first values and not a new snapshot of the variables.
first_values = nest.map_structure(lambda v: v.read_value(), persistent_state)
_, first_env_output, first_agent_state, first_agent_output = first_values
# Use scan to apply `step` multiple times, therefore unrolling the agent
# and environment interaction for `FLAGS.unroll_length`. `tf.scan` forwards
# the output of each call of `step` as input of the subsequent call of `step`.
# The unroll sequence is initialized with the agent and environment states
# and outputs as stored at the end of the previous unroll.
# `output` stores lists of all states and outputs stacked along the entire
# unroll. Note that the initial states and outputs (fed through `initializer`)
# are not in `output` and will need to be added manually later.
output = tf.scan(step, tf.range(FLAGS.unroll_length), first_values)
_, env_outputs, _, agent_outputs = output
# Update persistent state with the last output from the loop.
assign_ops = nest.map_structure(lambda v, t: v.assign(t[-1]),
persistent_state, output)
# The control dependency ensures that the final agent and environment states
# and outputs are stored in `persistent_state` (to initialize next unroll).
with tf.control_dependencies(nest.flatten(assign_ops)):
# Remove the batch dimension from the agent state/output.
first_agent_state = nest.map_structure(lambda t: t[0], first_agent_state)
first_agent_output = nest.map_structure(lambda t: t[0], first_agent_output)
agent_outputs = nest.map_structure(lambda t: t[:, 0], agent_outputs)
# Concatenate first output and the unroll along the time dimension.
full_agent_outputs, full_env_outputs = nest.map_structure(
lambda first, rest: tf.concat([[first], rest], 0),
(first_agent_output, first_env_output), (agent_outputs, env_outputs))
output = ActorOutput(
level_name=level_name, agent_state=first_agent_state,
env_outputs=full_env_outputs, agent_outputs=full_agent_outputs)
# No backpropagation should be done here.
return nest.map_structure(tf.stop_gradient, output)
def compute_baseline_loss(advantages):
# Loss for the baseline, summed over the time dimension.
# Multiply by 0.5 to match the standard update rule:
# d(loss) / d(baseline) = advantage
return .5 * tf.reduce_sum(tf.square(advantages))
def compute_entropy_loss(logits):
policy = tf.nn.softmax(logits)
log_policy = tf.nn.log_softmax(logits)
entropy_per_timestep = tf.reduce_sum(-policy * log_policy, axis=-1)
return -tf.reduce_sum(entropy_per_timestep)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=logits)
advantages = tf.stop_gradient(advantages)
policy_gradient_loss_per_timestep = cross_entropy * advantages
return tf.reduce_sum(policy_gradient_loss_per_timestep)
def build_learner(agent, agent_state, env_outputs, agent_outputs):
"""Builds the learner loop.
Args:
agent: A snt.RNNCore module outputting `AgentOutput` named tuples, with an
`unroll` call for computing the outputs for a whole trajectory.
agent_state: The initial agent state for each sequence in the batch.
env_outputs: A `StepOutput` namedtuple where each field is of shape
[T+1, ...].
agent_outputs: An `AgentOutput` namedtuple where each field is of shape
[T+1, ...].
Returns:
A tuple of (done, infos, and environment frames) where
the environment frames tensor causes an update.
"""
learner_outputs, _ = agent.unroll(agent_outputs.action, env_outputs,
agent_state)
# Use last baseline value (from the value function) to bootstrap.
bootstrap_value = learner_outputs.baseline[-1]
# At this point, the environment outputs at time step `t` are the inputs that
# lead to the learner_outputs at time step `t`. After the following shifting,
# the actions in agent_outputs and learner_outputs at time step `t` is what
# leads to the environment outputs at time step `t`.
agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs)
rewards, infos, done, _ = nest.map_structure(
lambda t: t[1:], env_outputs)
learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs)
if FLAGS.reward_clipping == 'abs_one':
clipped_rewards = tf.clip_by_value(rewards, -1, 1)
elif FLAGS.reward_clipping == 'soft_asymmetric':
squeezed = tf.tanh(rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = tf.where(rewards < 0, .3 * squeezed, squeezed) * 5.
discounts = tf.to_float(~done) * FLAGS.discounting
# Compute V-trace returns and weights.
# Note, this is put on the CPU because it's faster than on GPU. It can be
# improved further with XLA-compilation or with a custom TensorFlow operation.
with tf.device('/cpu'):
vtrace_returns = vtrace.from_logits(
behaviour_policy_logits=agent_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=agent_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=bootstrap_value)
# Compute loss as a weighted sum of the baseline loss, the policy gradient
# loss and an entropy regularization term.
total_loss = compute_policy_gradient_loss(
learner_outputs.policy_logits, agent_outputs.action,
vtrace_returns.pg_advantages)
total_loss += FLAGS.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline)
total_loss += FLAGS.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits)
# Optimization
num_env_frames = tf.train.get_global_step()
learning_rate = tf.train.polynomial_decay(FLAGS.learning_rate, num_env_frames,
FLAGS.total_environment_frames, 0)
optimizer = tf.train.RMSPropOptimizer(learning_rate, FLAGS.decay,
FLAGS.momentum, FLAGS.epsilon)
train_op = optimizer.minimize(total_loss)
# Merge updating the network and environment frames into a single tensor.
with tf.control_dependencies([train_op]):
num_env_frames_and_train = num_env_frames.assign_add(
FLAGS.batch_size * FLAGS.unroll_length * FLAGS.num_action_repeats)
# Adding a few summaries.
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('total_loss', total_loss)
tf.summary.histogram('action', agent_outputs.action)
return done, infos, num_env_frames_and_train
def create_environment(level_name, seed, is_test=False):
"""Creates an environment wrapped in a `FlowEnvironment`."""
if level_name in dmlab30.ALL_LEVELS:
level_name = 'contributed/dmlab30/' + level_name
# Note, you may want to use a level cache to speed of compilation of
# environment maps. See the documentation for the Python interface of DeepMind
# Lab.
config = {
'width': FLAGS.width,
'height': FLAGS.height,
'datasetPath': FLAGS.dataset_path,
'logLevel': 'WARN',
}
if is_test:
config['allowHoldOutLevels'] = 'true'
# Mixer seed for evalution, see
# https://github.com/deepmind/lab/blob/master/docs/users/python_api.md
config['mixerSeed'] = 0x600D5EED
p = py_process.PyProcess(environments.PyProcessDmLab, level_name, config,
FLAGS.num_action_repeats, seed)
return environments.FlowEnvironment(p.proxy)
@contextlib.contextmanager
def pin_global_variables(device):
"""Pins global variables to the specified device."""
def getter(getter, *args, **kwargs):
var_collections = kwargs.get('collections', None)
if var_collections is None:
var_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if tf.GraphKeys.GLOBAL_VARIABLES in var_collections:
with tf.device(device):
return getter(*args, **kwargs)
else:
return getter(*args, **kwargs)
with tf.variable_scope('', custom_getter=getter) as vs:
yield vs
def train(action_set, level_names):
"""Train."""
if is_single_machine():
local_job_device = ''
shared_job_device = ''
is_actor_fn = lambda i: True
is_learner = True
global_variable_device = '/gpu'
server = tf.train.Server.create_local_server()
filters = []
else:
local_job_device = '/job:%s/task:%d' % (FLAGS.job_name, FLAGS.task)
shared_job_device = '/job:learner/task:0'
is_actor_fn = lambda i: FLAGS.job_name == 'actor' and i == FLAGS.task
is_learner = FLAGS.job_name == 'learner'
# Placing the variable on CPU, makes it cheaper to send it to all the
# actors. Continual copying the variables from the GPU is slow.
global_variable_device = shared_job_device + '/cpu'
cluster = tf.train.ClusterSpec({
'actor': ['localhost:%d' % (8001 + i) for i in range(FLAGS.num_actors)],
'learner': ['localhost:8000']
})
server = tf.train.Server(cluster, job_name=FLAGS.job_name,
task_index=FLAGS.task)
filters = [shared_job_device, local_job_device]
# Only used to find the actor output structure.
with tf.Graph().as_default():
agent = Agent(len(action_set))
env = create_environment(level_names[0], seed=1)
structure = build_actor(agent, env, level_names[0], action_set)
flattened_structure = nest.flatten(structure)
dtypes = [t.dtype for t in flattened_structure]
shapes = [t.shape.as_list() for t in flattened_structure]
with tf.Graph().as_default(), \
tf.device(local_job_device + '/cpu'), \
pin_global_variables(global_variable_device):
tf.set_random_seed(FLAGS.seed) # Makes initialization deterministic.
# Create Queue and Agent on the learner.
with tf.device(shared_job_device):
queue = tf.FIFOQueue(1, dtypes, shapes, shared_name='buffer')
agent = Agent(len(action_set))
if is_single_machine() and 'dynamic_batching' in sys.modules:
# For single machine training, we use dynamic batching for improved GPU
# utilization. The semantics of single machine training are slightly
# different from the distributed setting because within a single unroll
# of an environment, the actions may be computed using different weights
# if an update happens within the unroll.
old_build = agent._build
@dynamic_batching.batch_fn
def build(*args):
with tf.device('/gpu'):
return old_build(*args)
tf.logging.info('Using dynamic batching.')
agent._build = build
# Build actors and ops to enqueue their output.
enqueue_ops = []
for i in range(FLAGS.num_actors):
if is_actor_fn(i):
level_name = level_names[i % len(level_names)]
tf.logging.info('Creating actor %d with level %s', i, level_name)
env = create_environment(level_name, seed=i + 1)
actor_output = build_actor(agent, env, level_name, action_set)
with tf.device(shared_job_device):
enqueue_ops.append(queue.enqueue(nest.flatten(actor_output)))
# If running in a single machine setup, run actors with QueueRunners
# (separate threads).
if is_learner and enqueue_ops:
tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))
# Build learner.
if is_learner:
# Create global step, which is the number of environment frames processed.
tf.get_variable(
'num_environment_frames',
initializer=tf.zeros_initializer(),
shape=[],
dtype=tf.int64,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
# Create batch (time major) and recreate structure.
dequeued = queue.dequeue_many(FLAGS.batch_size)
dequeued = nest.pack_sequence_as(structure, dequeued)
def make_time_major(s):
return nest.map_structure(
lambda t: tf.transpose(t, [1, 0] + list(range(t.shape.ndims))[2:]), s)
dequeued = dequeued._replace(
env_outputs=make_time_major(dequeued.env_outputs),
agent_outputs=make_time_major(dequeued.agent_outputs))
with tf.device('/gpu'):
# Using StagingArea allows us to prepare the next batch and send it to
# the GPU while we're performing a training step. This adds up to 1 step
# policy lag.
flattened_output = nest.flatten(dequeued)
area = tf.contrib.staging.StagingArea(
[t.dtype for t in flattened_output],
[t.shape for t in flattened_output])
stage_op = area.put(flattened_output)
data_from_actors = nest.pack_sequence_as(structure, area.get())
# Unroll agent on sequence, create losses and update ops.
output = build_learner(agent, data_from_actors.agent_state,
data_from_actors.env_outputs,
data_from_actors.agent_outputs)
# Create MonitoredSession (to run the graph, checkpoint and log).
tf.logging.info('Creating MonitoredSession, is_chief %s', is_learner)
config = tf.ConfigProto(allow_soft_placement=True, device_filters=filters)
with tf.train.MonitoredTrainingSession(
server.target,
is_chief=is_learner,
checkpoint_dir=FLAGS.logdir,
save_checkpoint_secs=600,
save_summaries_secs=30,
log_step_count_steps=50000,
config=config,
hooks=[py_process.PyProcessHook()]) as session:
if is_learner:
# Logging.
level_returns = {level_name: [] for level_name in level_names}
summary_writer = tf.summary.FileWriterCache.get(FLAGS.logdir)
# Prepare data for first run.
session.run_step_fn(
lambda step_context: step_context.session.run(stage_op))
# Execute learning and track performance.
num_env_frames_v = 0
while num_env_frames_v < FLAGS.total_environment_frames:
level_names_v, done_v, infos_v, num_env_frames_v, _ = session.run(
(data_from_actors.level_name,) + output + (stage_op,))
level_names_v = np.repeat([level_names_v], done_v.shape[0], 0)
for level_name, episode_return, episode_step in zip(
level_names_v[done_v],
infos_v.episode_return[done_v],
infos_v.episode_step[done_v]):
episode_frames = episode_step * FLAGS.num_action_repeats
tf.logging.info('Level: %s Episode return: %f',
level_name, episode_return)
summary = tf.summary.Summary()
summary.value.add(tag=level_name + '/episode_return',
simple_value=episode_return)
summary.value.add(tag=level_name + '/episode_frames',
simple_value=episode_frames)
summary_writer.add_summary(summary, num_env_frames_v)
if FLAGS.level_name == 'dmlab30':
level_returns[level_name].append(episode_return)
if (FLAGS.level_name == 'dmlab30' and
min(map(len, level_returns.values())) >= 1):
no_cap = dmlab30.compute_human_normalized_score(level_returns,
per_level_cap=None)
cap_100 = dmlab30.compute_human_normalized_score(level_returns,
per_level_cap=100)
summary = tf.summary.Summary()
summary.value.add(
tag='dmlab30/training_no_cap', simple_value=no_cap)
summary.value.add(
tag='dmlab30/training_cap_100', simple_value=cap_100)
summary_writer.add_summary(summary, num_env_frames_v)
# Clear level scores.
level_returns = {level_name: [] for level_name in level_names}
else:
# Execute actors (they just need to enqueue their output).
while True:
session.run(enqueue_ops)
def test(action_set, level_names):
"""Test."""
level_returns = {level_name: [] for level_name in level_names}
with tf.Graph().as_default():
agent = Agent(len(action_set))
outputs = {}
for level_name in level_names:
env = create_environment(level_name, seed=1, is_test=True)
outputs[level_name] = build_actor(agent, env, level_name, action_set)
with tf.train.SingularMonitoredSession(
checkpoint_dir=FLAGS.logdir,
hooks=[py_process.PyProcessHook()]) as session:
for level_name in level_names:
tf.logging.info('Testing level: %s', level_name)
while True:
done_v, infos_v = session.run((
outputs[level_name].env_outputs.done,
outputs[level_name].env_outputs.info
))
returns = level_returns[level_name]
returns.extend(infos_v.episode_return[1:][done_v[1:]])
if len(returns) >= FLAGS.test_num_episodes:
tf.logging.info('Mean episode return: %f', np.mean(returns))
break
if FLAGS.level_name == 'dmlab30':
no_cap = dmlab30.compute_human_normalized_score(level_returns,
per_level_cap=None)
cap_100 = dmlab30.compute_human_normalized_score(level_returns,
per_level_cap=100)
tf.logging.info('No cap.: %f Cap 100: %f', no_cap, cap_100)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
action_set = environments.DEFAULT_ACTION_SET
if FLAGS.level_name == 'dmlab30' and FLAGS.mode == 'train':
level_names = dmlab30.LEVEL_MAPPING.keys()
elif FLAGS.level_name == 'dmlab30' and FLAGS.mode == 'test':
level_names = dmlab30.LEVEL_MAPPING.values()
else:
level_names = [FLAGS.level_name]
if FLAGS.mode == 'train':
train(action_set, level_names)
else:
test(action_set, level_names)
if __name__ == '__main__':
tf.app.run()
|
scalable_agent-master
|
experiment.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
nest = tf.contrib.framework.nest
VTraceFromLogitsReturns = collections.namedtuple(
'VTraceFromLogitsReturns',
['vs', 'pg_advantages', 'log_rhos',
'behaviour_action_log_probs', 'target_action_log_probs'])
VTraceReturns = collections.namedtuple('VTraceReturns', 'vs pg_advantages')
def log_probs_from_logits_and_actions(policy_logits, actions):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
NUM_ACTIONS refers to the number of actions.
Args:
policy_logits: A float32 tensor of shape [T, B, NUM_ACTIONS] with
un-normalized log-probabilities parameterizing a softmax policy.
actions: An int32 tensor of shape [T, B] with actions.
Returns:
A float32 tensor of shape [T, B] corresponding to the sampling log
probability of the chosen action w.r.t. the policy.
"""
policy_logits = tf.convert_to_tensor(policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=policy_logits, labels=actions)
def from_logits(
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_logits'):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
NUM_ACTIONS refers to the number of actions.
Args:
behaviour_policy_logits: A float32 tensor of shape [T, B, NUM_ACTIONS] with
un-normalized log-probabilities parametrizing the softmax behaviour
policy.
target_policy_logits: A float32 tensor of shape [T, B, NUM_ACTIONS] with
un-normalized log-probabilities parametrizing the softmax target policy.
actions: An int32 tensor of shape [T, B] of actions sampled from the
behaviour policy.
discounts: A float32 tensor of shape [T, B] with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
"""
behaviour_policy_logits = tf.convert_to_tensor(
behaviour_policy_logits, dtype=tf.float32)
target_policy_logits = tf.convert_to_tensor(
target_policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits.shape.assert_has_rank(3)
target_policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
with tf.name_scope(name, values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value]):
target_action_log_probs = log_probs_from_logits_and_actions(
target_policy_logits, actions)
behaviour_action_log_probs = log_probs_from_logits_and_actions(
behaviour_policy_logits, actions)
log_rhos = target_action_log_probs - behaviour_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict()
)
def from_importance_weights(
log_rhos, discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_importance_weights'):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
NUM_ACTIONS refers to the number of actions. This code also supports the
case where all tensors have the same number of additional dimensions, e.g.,
`rewards` is [T, B, C], `values` is [T, B, C], `bootstrap_value` is [B, C].
Args:
log_rhos: A float32 tensor of shape [T, B, NUM_ACTIONS] representing the log
importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape [T, B] with discounts encountered when
following the behaviour policy.
rewards: A float32 tensor of shape [T, B] containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape [T, B]. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(clip_rho_threshold,
dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(clip_pg_rho_threshold,
dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf.name_scope(name, values=[
log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(clip_rho_threshold, rhos, name='clipped_rhos')
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name='cs')
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
sequences = (discounts, cs, deltas)
# V-trace vs are calculated through a scan from the back to the beginning
# of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
reverse=True, # Computation starts from the back.
name='scan')
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name='vs')
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat([
vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(clip_pg_rho_threshold, rhos,
name='clipped_pg_rhos')
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
|
scalable_agent-master
|
vtrace.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environments and environment helper classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import numpy as np
import tensorflow as tf
import deepmind_lab
nest = tf.contrib.framework.nest
class LocalLevelCache(object):
"""Local level cache."""
def __init__(self, cache_dir='/tmp/level_cache'):
self._cache_dir = cache_dir
tf.gfile.MakeDirs(cache_dir)
def fetch(self, key, pk3_path):
path = os.path.join(self._cache_dir, key)
if tf.gfile.Exists(path):
tf.gfile.Copy(path, pk3_path, overwrite=True)
return True
return False
def write(self, key, pk3_path):
path = os.path.join(self._cache_dir, key)
if not tf.gfile.Exists(path):
tf.gfile.Copy(pk3_path, path)
DEFAULT_ACTION_SET = (
(0, 0, 0, 1, 0, 0, 0), # Forward
(0, 0, 0, -1, 0, 0, 0), # Backward
(0, 0, -1, 0, 0, 0, 0), # Strafe Left
(0, 0, 1, 0, 0, 0, 0), # Strafe Right
(-20, 0, 0, 0, 0, 0, 0), # Look Left
(20, 0, 0, 0, 0, 0, 0), # Look Right
(-20, 0, 0, 1, 0, 0, 0), # Look Left + Forward
(20, 0, 0, 1, 0, 0, 0), # Look Right + Forward
(0, 0, 0, 0, 1, 0, 0), # Fire.
)
class PyProcessDmLab(object):
"""DeepMind Lab wrapper for PyProcess."""
def __init__(self, level, config, num_action_repeats, seed,
runfiles_path=None, level_cache=None):
self._num_action_repeats = num_action_repeats
self._random_state = np.random.RandomState(seed=seed)
if runfiles_path:
deepmind_lab.set_runfiles_path(runfiles_path)
config = {k: str(v) for k, v in config.iteritems()}
self._observation_spec = ['RGB_INTERLEAVED', 'INSTR']
self._env = deepmind_lab.Lab(
level=level,
observations=self._observation_spec,
config=config,
level_cache=level_cache,
)
def _reset(self):
self._env.reset(seed=self._random_state.randint(0, 2 ** 31 - 1))
def _observation(self):
d = self._env.observations()
return [d[k] for k in self._observation_spec]
def initial(self):
self._reset()
return self._observation()
def step(self, action):
reward = self._env.step(action, num_steps=self._num_action_repeats)
done = np.array(not self._env.is_running())
if done:
self._reset()
observation = self._observation()
reward = np.array(reward, dtype=np.float32)
return reward, done, observation
def close(self):
self._env.close()
@staticmethod
def _tensor_specs(method_name, unused_kwargs, constructor_kwargs):
"""Returns a nest of `TensorSpec` with the method's output specification."""
width = constructor_kwargs['config'].get('width', 320)
height = constructor_kwargs['config'].get('height', 240)
observation_spec = [
tf.contrib.framework.TensorSpec([height, width, 3], tf.uint8),
tf.contrib.framework.TensorSpec([], tf.string),
]
if method_name == 'initial':
return observation_spec
elif method_name == 'step':
return (
tf.contrib.framework.TensorSpec([], tf.float32),
tf.contrib.framework.TensorSpec([], tf.bool),
observation_spec,
)
StepOutputInfo = collections.namedtuple('StepOutputInfo',
'episode_return episode_step')
StepOutput = collections.namedtuple('StepOutput',
'reward info done observation')
class FlowEnvironment(object):
"""An environment that returns a new state for every modifying method.
The environment returns a new environment state for every modifying action and
forces previous actions to be completed first. Similar to `flow` for
`TensorArray`.
"""
def __init__(self, env):
"""Initializes the environment.
Args:
env: An environment with `initial()` and `step(action)` methods where
`initial` returns the initial observations and `step` takes an action
and returns a tuple of (reward, done, observation). `observation`
should be the observation after the step is taken. If `done` is
True, the observation should be the first observation in the next
episode.
"""
self._env = env
def initial(self):
"""Returns the initial output and initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. The reward and transition type in the `StepOutput` is the
reward/transition type that lead to the observation in `StepOutput`.
"""
with tf.name_scope('flow_environment_initial'):
initial_reward = tf.constant(0.)
initial_info = StepOutputInfo(tf.constant(0.), tf.constant(0))
initial_done = tf.constant(True)
initial_observation = self._env.initial()
initial_output = StepOutput(
initial_reward,
initial_info,
initial_done,
initial_observation)
# Control dependency to make sure the next step can't be taken before the
# initial output has been read from the environment.
with tf.control_dependencies(nest.flatten(initial_output)):
initial_flow = tf.constant(0, dtype=tf.int64)
initial_state = (initial_flow, initial_info)
return initial_output, initial_state
def step(self, action, state):
"""Takes a step in the environment.
Args:
action: An action tensor suitable for the underlying environment.
state: The environment state from the last step or initial state.
Returns:
A tuple of (`StepOutput`, environment state). The environment state should
be passed in to the next invocation of `step` and should not be used in
any other way. On episode end (i.e. `done` is True), the returned reward
should be included in the sum of rewards for the ending episode and not
part of the next episode.
"""
with tf.name_scope('flow_environment_step'):
flow, info = nest.map_structure(tf.convert_to_tensor, state)
# Make sure the previous step has been executed before running the next
# step.
with tf.control_dependencies([flow]):
reward, done, observation = self._env.step(action)
with tf.control_dependencies(nest.flatten(observation)):
new_flow = tf.add(flow, 1)
# When done, include the reward in the output info but not in the
# state for the next step.
new_info = StepOutputInfo(info.episode_return + reward,
info.episode_step + 1)
new_state = new_flow, nest.map_structure(
lambda a, b: tf.where(done, a, b),
StepOutputInfo(tf.constant(0.), tf.constant(0)),
new_info)
output = StepOutput(reward, new_info, done, observation)
return output, new_state
|
scalable_agent-master
|
environments.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyProcess.
This file includes utilities for running code in separate Python processes as
part of a TensorFlow graph. It is similar to tf.py_func, but the code is run in
separate processes to avoid the GIL.
Example:
class Zeros(object):
def __init__(self, dim0):
self._dim0 = dim0
def compute(self, dim1):
return np.zeros([self._dim0, dim1], dtype=np.int32)
@staticmethod
def _tensor_specs(method_name, kwargs, constructor_kwargs):
dim0 = constructor_kwargs['dim0']
dim1 = kwargs['dim1']
if method_name == 'compute':
return tf.contrib.framework.TensorSpec([dim0, dim1], tf.int32)
with tf.Graph().as_default():
p = py_process.PyProcess(Zeros, 1)
result = p.proxy.compute(2)
with tf.train.SingularMonitoredSession(
hooks=[py_process.PyProcessHook()]) as session:
print(session.run(result)) # Prints [[0, 0]].
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import tensorflow as tf
from tensorflow.python.util import function_utils
nest = tf.contrib.framework.nest
class _TFProxy(object):
"""A proxy that creates TensorFlow operations for each method call to a
separate process."""
def __init__(self, type_, constructor_kwargs):
self._type = type_
self._constructor_kwargs = constructor_kwargs
def __getattr__(self, name):
def call(*args):
kwargs = dict(
zip(function_utils.fn_args(getattr(self._type, name))[1:], args))
specs = self._type._tensor_specs(name, kwargs, self._constructor_kwargs)
if specs is None:
raise ValueError(
'No tensor specifications were provided for: %s' % name)
flat_dtypes = nest.flatten(nest.map_structure(lambda s: s.dtype, specs))
flat_shapes = nest.flatten(nest.map_structure(lambda s: s.shape, specs))
def py_call(*args):
try:
self._out.send(args)
result = self._out.recv()
if isinstance(result, Exception):
raise result
if result is not None:
return result
except Exception as e:
if isinstance(e, IOError):
raise StopIteration() # Clean exit.
else:
raise
result = tf.py_func(py_call, (name,) + tuple(args), flat_dtypes,
name=name)
if isinstance(result, tf.Operation):
return result
for t, shape in zip(result, flat_shapes):
t.set_shape(shape)
return nest.pack_sequence_as(specs, result)
return call
def _start(self):
self._out, in_ = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker_fn,
args=(self._type, self._constructor_kwargs, in_))
self._process.start()
result = self._out.recv()
if isinstance(result, Exception):
raise result
def _close(self, session):
try:
self._out.send(None)
self._out.close()
except IOError:
pass
self._process.join()
def _worker_fn(self, type_, constructor_kwargs, in_):
try:
o = type_(**constructor_kwargs)
in_.send(None) # Ready.
while True:
# Receive request.
serialized = in_.recv()
if serialized is None:
if hasattr(o, 'close'):
o.close()
in_.close()
return
method_name = str(serialized[0])
inputs = serialized[1:]
# Compute result.
results = getattr(o, method_name)(*inputs)
if results is not None:
results = nest.flatten(results)
# Respond.
in_.send(results)
except Exception as e:
if 'o' in locals() and hasattr(o, 'close'):
try:
o.close()
except:
pass
in_.send(e)
class PyProcess(object):
COLLECTION = 'py_process_processes'
def __init__(self, type_, *constructor_args, **constructor_kwargs):
self._type = type_
self._constructor_kwargs = dict(
zip(function_utils.fn_args(type_.__init__)[1:], constructor_args))
self._constructor_kwargs.update(constructor_kwargs)
tf.add_to_collection(PyProcess.COLLECTION, self)
self._proxy = _TFProxy(type_, self._constructor_kwargs)
@property
def proxy(self):
"""A proxy that creates TensorFlow operations for each method call."""
return self._proxy
def close(self, session):
self._proxy._close(session)
def start(self):
self._proxy._start()
class PyProcessHook(tf.train.SessionRunHook):
"""A MonitoredSession hook that starts and stops PyProcess instances."""
def begin(self):
tf.logging.info('Starting all processes.')
tp = multiprocessing.pool.ThreadPool()
tp.map(lambda p: p.start(), tf.get_collection(PyProcess.COLLECTION))
tp.close()
tp.join()
tf.logging.info('All processes started.')
def end(self, session):
tf.logging.info('Closing all processes.')
tp = multiprocessing.pool.ThreadPool()
tp.map(lambda p: p.close(session), tf.get_collection(PyProcess.COLLECTION))
tp.close()
tp.join()
tf.logging.info('All processes closed.')
|
scalable_agent-master
|
py_process.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dynamic batching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
batcher_ops = tf.load_op_library('./batcher.so')
nest = tf.contrib.framework.nest
class _Batcher(object):
"""A thin layer around the Batcher TensorFlow operations.
It shares some of the interface with queues (close(), name) to be able to use
it correctly as the input to a QueueRunner.
"""
def __init__(self, minimum_batch_size, maximum_batch_size, timeout_ms):
self._handle = batcher_ops.batcher(minimum_batch_size, maximum_batch_size,
timeout_ms or -1)
@property
def name(self):
return 'batcher'
def get_inputs(self, input_dtypes):
return batcher_ops.batcher_get_inputs(self._handle, input_dtypes)
def set_outputs(self, flat_result, computation_id):
return batcher_ops.batcher_set_outputs(self._handle, flat_result,
computation_id)
def compute(self, flat_args, output_dtypes):
return batcher_ops.batcher_compute(self._handle, flat_args, output_dtypes)
def close(self, cancel_pending_enqueues=False, name=None):
del cancel_pending_enqueues
return batcher_ops.batcher_close(self._handle, name=name)
def batch_fn(f):
"""See `batch_fn_with_options` for details."""
return batch_fn_with_options()(f)
def batch_fn_with_options(minimum_batch_size=1, maximum_batch_size=1024,
timeout_ms=100):
"""Python decorator that automatically batches computations.
When the decorated function is called, it creates an operation that adds the
inputs to a queue, waits until the computation is done, and returns the
tensors. The inputs must be nests (see `tf.contrib.framework.nest`) and the
first dimension of each tensor in the nest must have size 1.
It adds a QueueRunner that asynchronously keeps fetching batches of data,
computes the results and pushes the results back to the caller.
Example usage:
@dynamic_batching.batch_fn_with_options(
minimum_batch_size=10, timeout_ms=100)
def fn(a, b):
return a + b
output0 = fn(tf.constant([1]), tf.constant([2])) # Will be batched with the
# next call.
output1 = fn(tf.constant([3]), tf.constant([4]))
Note, gradients are currently not supported.
Note, if minimum_batch_size == maximum_batch_size and timeout_ms=None, then
the batch size of input arguments will be set statically. Otherwise, it will
be None.
Args:
minimum_batch_size: The minimum batch size before processing starts.
maximum_batch_size: The maximum batch size.
timeout_ms: Milliseconds after a batch of samples is requested before it is
processed, even if the batch size is smaller than `minimum_batch_size`. If
None, there is no timeout.
Returns:
The decorator.
"""
def decorator(f):
"""Decorator."""
batcher = [None]
batched_output = [None]
@functools.wraps(f)
def wrapper(*args):
"""Wrapper."""
flat_args = [tf.convert_to_tensor(arg) for arg in nest.flatten(args)]
if batcher[0] is None:
# Remove control dependencies which is necessary when created in loops,
# etc.
with tf.control_dependencies(None):
input_dtypes = [t.dtype for t in flat_args]
batcher[0] = _Batcher(minimum_batch_size, maximum_batch_size,
timeout_ms)
# Compute in batches using a queue runner.
if minimum_batch_size == maximum_batch_size and timeout_ms is None:
batch_size = minimum_batch_size
else:
batch_size = None
# Dequeue batched input.
inputs, computation_id = batcher[0].get_inputs(input_dtypes)
nest.map_structure(
lambda i, a: i.set_shape([batch_size] + a.shape.as_list()[1:]),
inputs, flat_args)
# Compute result.
result = f(*nest.pack_sequence_as(args, inputs))
batched_output[0] = result
flat_result = nest.flatten(result)
# Insert results back into batcher.
set_op = batcher[0].set_outputs(flat_result, computation_id)
tf.train.add_queue_runner(tf.train.QueueRunner(batcher[0], [set_op]))
# Insert inputs into input queue.
flat_result = batcher[0].compute(
flat_args,
[t.dtype for t in nest.flatten(batched_output[0])])
# Restore structure and shapes.
result = nest.pack_sequence_as(batched_output[0], flat_result)
static_batch_size = nest.flatten(args)[0].shape[0]
nest.map_structure(
lambda t, b: t.set_shape([static_batch_size] + b.shape[1:].as_list()),
result, batched_output[0])
return result
return wrapper
return decorator
|
scalable_agent-master
|
dynamic_batching.py
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A binary building the graph and performing the optimization of LEO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
from absl import flags
from six.moves import zip
import tensorflow as tf
import config
import data
import model
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoint_path", "/tmp/leo", "Path to restore from and "
"save to checkpoints.")
flags.DEFINE_integer(
"checkpoint_steps", 1000, "The frequency, in number of "
"steps, of saving the checkpoints.")
flags.DEFINE_boolean("evaluation_mode", False, "Whether to run in an "
"evaluation-only mode.")
def _clip_gradients(gradients, gradient_threshold, gradient_norm_threshold):
"""Clips gradients by value and then by norm."""
if gradient_threshold > 0:
gradients = [
tf.clip_by_value(g, -gradient_threshold, gradient_threshold)
for g in gradients
]
if gradient_norm_threshold > 0:
gradients = [
tf.clip_by_norm(g, gradient_norm_threshold) for g in gradients
]
return gradients
def _construct_validation_summaries(metavalid_loss, metavalid_accuracy):
tf.summary.scalar("metavalid_loss", metavalid_loss)
tf.summary.scalar("metavalid_valid_accuracy", metavalid_accuracy)
# The summaries are passed implicitly by TensorFlow.
def _construct_training_summaries(metatrain_loss, metatrain_accuracy,
model_grads, model_vars):
tf.summary.scalar("metatrain_loss", metatrain_loss)
tf.summary.scalar("metatrain_valid_accuracy", metatrain_accuracy)
for g, v in zip(model_grads, model_vars):
histogram_name = v.name.split(":")[0]
tf.summary.histogram(histogram_name, v)
histogram_name = "gradient/{}".format(histogram_name)
tf.summary.histogram(histogram_name, g)
def _construct_examples_batch(batch_size, split, num_classes,
num_tr_examples_per_class,
num_val_examples_per_class):
data_provider = data.DataProvider(split, config.get_data_config())
examples_batch = data_provider.get_batch(batch_size, num_classes,
num_tr_examples_per_class,
num_val_examples_per_class)
return utils.unpack_data(examples_batch)
def _construct_loss_and_accuracy(inner_model, inputs, is_meta_training):
"""Returns batched loss and accuracy of the model ran on the inputs."""
call_fn = functools.partial(
inner_model.__call__, is_meta_training=is_meta_training)
per_instance_loss, per_instance_accuracy = tf.map_fn(
call_fn,
inputs,
dtype=(tf.float32, tf.float32),
back_prop=is_meta_training)
loss = tf.reduce_mean(per_instance_loss)
accuracy = tf.reduce_mean(per_instance_accuracy)
return loss, accuracy
def construct_graph(outer_model_config):
"""Constructs the optimization graph."""
inner_model_config = config.get_inner_model_config()
tf.logging.info("inner_model_config: {}".format(inner_model_config))
leo = model.LEO(inner_model_config, use_64bits_dtype=False)
num_classes = outer_model_config["num_classes"]
num_tr_examples_per_class = outer_model_config["num_tr_examples_per_class"]
metatrain_batch = _construct_examples_batch(
outer_model_config["metatrain_batch_size"], "train", num_classes,
num_tr_examples_per_class,
outer_model_config["num_val_examples_per_class"])
metatrain_loss, metatrain_accuracy = _construct_loss_and_accuracy(
leo, metatrain_batch, True)
metatrain_gradients, metatrain_variables = leo.grads_and_vars(metatrain_loss)
# Avoids NaNs in summaries.
metatrain_loss = tf.cond(tf.is_nan(metatrain_loss),
lambda: tf.zeros_like(metatrain_loss),
lambda: metatrain_loss)
metatrain_gradients = _clip_gradients(
metatrain_gradients, outer_model_config["gradient_threshold"],
outer_model_config["gradient_norm_threshold"])
_construct_training_summaries(metatrain_loss, metatrain_accuracy,
metatrain_gradients, metatrain_variables)
optimizer = tf.train.AdamOptimizer(
learning_rate=outer_model_config["outer_lr"])
global_step = tf.train.get_or_create_global_step()
train_op = optimizer.apply_gradients(
list(zip(metatrain_gradients, metatrain_variables)), global_step)
data_config = config.get_data_config()
tf.logging.info("data_config: {}".format(data_config))
total_examples_per_class = data_config["total_examples_per_class"]
metavalid_batch = _construct_examples_batch(
outer_model_config["metavalid_batch_size"], "val", num_classes,
num_tr_examples_per_class,
total_examples_per_class - num_tr_examples_per_class)
metavalid_loss, metavalid_accuracy = _construct_loss_and_accuracy(
leo, metavalid_batch, False)
metatest_batch = _construct_examples_batch(
outer_model_config["metatest_batch_size"], "test", num_classes,
num_tr_examples_per_class,
total_examples_per_class - num_tr_examples_per_class)
_, metatest_accuracy = _construct_loss_and_accuracy(
leo, metatest_batch, False)
_construct_validation_summaries(metavalid_loss, metavalid_accuracy)
return (train_op, global_step, metatrain_accuracy, metavalid_accuracy,
metatest_accuracy)
def run_training_loop(checkpoint_path):
"""Runs the training loop, either saving a checkpoint or evaluating it."""
outer_model_config = config.get_outer_model_config()
tf.logging.info("outer_model_config: {}".format(outer_model_config))
(train_op, global_step, metatrain_accuracy, metavalid_accuracy,
metatest_accuracy) = construct_graph(outer_model_config)
num_steps_limit = outer_model_config["num_steps_limit"]
best_metavalid_accuracy = 0.
with tf.train.MonitoredTrainingSession(
checkpoint_dir=checkpoint_path,
save_summaries_steps=FLAGS.checkpoint_steps,
log_step_count_steps=FLAGS.checkpoint_steps,
save_checkpoint_steps=FLAGS.checkpoint_steps,
summary_dir=checkpoint_path) as sess:
if not FLAGS.evaluation_mode:
global_step_ev = sess.run(global_step)
while global_step_ev < num_steps_limit:
if global_step_ev % FLAGS.checkpoint_steps == 0:
# Just after saving checkpoint, calculate accuracy 10 times and save
# the best checkpoint for early stopping.
metavalid_accuracy_ev = utils.evaluate_and_average(
sess, metavalid_accuracy, 10)
tf.logging.info("Step: {} meta-valid accuracy: {}".format(
global_step_ev, metavalid_accuracy_ev))
if metavalid_accuracy_ev > best_metavalid_accuracy:
utils.copy_checkpoint(checkpoint_path, global_step_ev,
metavalid_accuracy_ev)
best_metavalid_accuracy = metavalid_accuracy_ev
_, global_step_ev, metatrain_accuracy_ev = sess.run(
[train_op, global_step, metatrain_accuracy])
if global_step_ev % (FLAGS.checkpoint_steps // 2) == 0:
tf.logging.info("Step: {} meta-train accuracy: {}".format(
global_step_ev, metatrain_accuracy_ev))
else:
assert not FLAGS.checkpoint_steps
num_metatest_estimates = (
10000 // outer_model_config["metatest_batch_size"])
test_accuracy = utils.evaluate_and_average(sess, metatest_accuracy,
num_metatest_estimates)
tf.logging.info("Metatest accuracy: %f", test_accuracy)
with tf.gfile.Open(
os.path.join(checkpoint_path, "test_accuracy"), "wb") as f:
pickle.dump(test_accuracy, f)
def main(argv):
del argv # Unused.
run_training_loop(FLAGS.checkpoint_path)
if __name__ == "__main__":
tf.app.run()
|
leo-master
|
runner.py
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ml_leo.model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import mock
import numpy as np
from six.moves import zip
import sonnet as snt
import tensorflow as tf
import data
import model
# Adding float64 and 32 gives an error in TensorFlow.
constant_float64 = lambda x: tf.constant(x, dtype=tf.float64)
def get_test_config():
"""Returns the config used to initialize LEO model."""
config = {}
config["inner_unroll_length"] = 3
config["finetuning_unroll_length"] = 4
config["inner_lr_init"] = 0.1
config["finetuning_lr_init"] = 0.2
config["num_latents"] = 1
config["dropout_rate"] = 0.3
config["kl_weight"] = 0.01
config["encoder_penalty_weight"] = 0.01
config["l2_penalty_weight"] = 0.01
config["orthogonality_penalty_weight"] = 0.01
return config
def mockify_everything(test_function=None,
mock_finetuning=True,
mock_encdec=True):
"""Mockifies most of the LEO"s model functions to behave as identity."""
def inner_decorator(f):
@functools.wraps(f)
def mockified(*args, **kwargs):
identity_mapping = lambda unused_self, inp, *args: tf.identity(inp)
mock_encoder = mock.patch.object(
model.LEO, "encoder", new=identity_mapping)
mock_relation_network = mock.patch.object(
model.LEO, "relation_network", new=identity_mapping)
mock_decoder = mock.patch.object(
model.LEO, "decoder", new=identity_mapping)
mock_average = mock.patch.object(
model.LEO, "average_codes_per_class", new=identity_mapping)
mock_loss = mock.patch.object(model.LEO, "loss_fn", new=identity_mapping)
float64_zero = constant_float64(0.)
def identity_sample_fn(unused_self, inp, *unused_args, **unused_kwargs):
return inp, float64_zero
def mock_sample_with_split(unused_self, inp, *unused_args,
**unused_kwargs):
out = tf.split(inp, 2, axis=-1)[0]
return out, float64_zero
# When not mocking relation net, it will double the latents.
mock_sample = mock.patch.object(
model.LEO,
"possibly_sample",
new=identity_sample_fn if mock_encdec else mock_sample_with_split)
def dummy_predict(unused_self, inputs, classifier_weights):
return inputs * classifier_weights**2
mock_predict = mock.patch.object(model.LEO, "predict", new=dummy_predict)
mock_decoder_regularizer = mock.patch.object(
model.LEO, "_decoder_orthogonality_reg", new=float64_zero)
all_mocks = [mock_average, mock_loss, mock_predict, mock_sample]
if mock_encdec:
all_mocks.extend([
mock_encoder,
mock_relation_network,
mock_decoder,
mock_decoder_regularizer,
])
if mock_finetuning:
mock_finetuning_inner = mock.patch.object(
model.LEO,
"finetuning_inner_loop",
new=lambda unused_self, d, l, adapted: (adapted, float64_zero))
all_mocks.append(mock_finetuning_inner)
for m in all_mocks:
m.start()
f(*args, **kwargs)
for m in all_mocks:
m.stop()
return mockified
if test_function:
# Decorator called with no arguments, so the function is passed
return inner_decorator(test_function)
return inner_decorator
def _random_problem_instance(num_classes=7,
num_examples_per_class=5,
embedding_dim=17, use_64bits_dtype=True):
inputs_dtype = tf.float64 if use_64bits_dtype else tf.float32
inputs = tf.constant(
np.random.random((num_classes, num_examples_per_class, embedding_dim)),
dtype=inputs_dtype)
outputs_dtype = tf.int64 if use_64bits_dtype else tf.int32
outputs = tf.constant(
np.random.randint(
low=0,
high=num_classes,
size=(num_classes, num_examples_per_class, 1)), dtype=outputs_dtype)
problem = data.ProblemInstance(
tr_input=inputs,
val_input=inputs,
tr_info=inputs,
tr_output=outputs,
val_output=outputs,
val_info=inputs)
return problem
class LEOTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(LEOTest, self).setUp()
self._problem = _random_problem_instance(5, 7, 4)
# This doesn"t call any function, so doesn't need the mocks to be started.
self._config = get_test_config()
self._leo = model.LEO(config=self._config)
self.addCleanup(mock.patch.stopall)
@mockify_everything
def test_instantiate_leo(self):
encoder_output = self._leo.encoder(5, 7)
with self.session() as sess:
encoder_output_ev = sess.run(encoder_output)
self.assertEqual(encoder_output_ev, 5)
@mockify_everything
def test_inner_loop_adaptation(self):
problem_instance = data.ProblemInstance(
tr_input=constant_float64([[[4.]]]),
tr_output=tf.constant([[[0]]], dtype=tf.int64),
tr_info=[],
val_input=[],
val_output=[],
val_info=[],
)
# encoder = decoder = id
# predict returns classifier_weights**2 * inputs = latents**2 * inputs
# loss = id = inputs*latents
# dl/dlatent = 2 * latent * inputs
# 4 -> 4 - 0.1 * 2 * 4 * 4 = 0.8
# 0.8 -> 0.8 - 0.1 * 2 * 0.8 * 4 = 0.16
# 0.16 -> 0.16 - 0.1 * 2 * 0.16 * 4 = 0.032
# is_meta_training=False disables kl and encoder penalties
adapted_parameters, _ = self._leo(problem_instance, is_meta_training=False)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(adapted_parameters), 0.032)
@mockify_everything
def test_map_input(self):
problem = [
constant_float64([[[5.]]]), # tr_input
tf.constant([[[0]]], dtype=tf.int64), # tr_output
constant_float64([[[0]]]), # tr_info
constant_float64([[[0.]]]), # val_input
tf.constant([[[0]]], dtype=tf.int64), # val_output
constant_float64([[[0]]]), # val_info
]
another_problem = [
constant_float64([[[4.]]]),
tf.constant([[[0]]], dtype=tf.int64),
constant_float64([[[0]]]),
constant_float64([[[0.]]]),
tf.constant([[[0]]], dtype=tf.int64),
constant_float64([[[0]]]),
]
# first dimension (list): diffent input kind (tr_input, val_output, etc.)
# second dim: different problems; this has to be a tensor dim for map_fn
# to split over it.
# next three: (1, 1, 1)
# map_fn cannot receive structured inputs (namedtuples).
ins = [
tf.stack([in1, in2])
for in1, in2 in zip(problem, another_problem)
]
two_adapted_params, _ = tf.map_fn(
self._leo.__call__, ins, dtype=(tf.float64, tf.float64))
with self.session() as sess:
sess.run(tf.global_variables_initializer())
output1, output2 = sess.run(two_adapted_params)
self.assertGreater(abs(output1 - output2), 1e-3)
@mockify_everything
def test_setting_is_meta_training(self):
self._leo(self._problem, is_meta_training=True)
self.assertTrue(self._leo.is_meta_training)
self._leo(self._problem, is_meta_training=False)
self.assertFalse(self._leo.is_meta_training)
@mockify_everything(mock_finetuning=False)
def test_finetuning_improves_loss(self):
# Create graph
self._leo(self._problem)
latents, _ = self._leo.forward_encoder(self._problem)
leo_loss, adapted_classifier_weights, _ = self._leo.leo_inner_loop(
self._problem, latents)
leo_loss = tf.reduce_mean(leo_loss)
finetuning_loss, _ = self._leo.finetuning_inner_loop(
self._problem, leo_loss, adapted_classifier_weights)
finetuning_loss = tf.reduce_mean(finetuning_loss)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
leo_loss_ev, finetuning_loss_ev = sess.run([leo_loss, finetuning_loss])
self.assertGreater(leo_loss_ev - 1e-3, finetuning_loss_ev)
@mockify_everything
def test_gradients_dont_flow_through_input(self):
# Create graph
self._leo(self._problem)
latents, _ = self._leo.forward_encoder(self._problem)
grads = tf.gradients(self._problem.tr_input, latents)
self.assertIsNone(grads[0])
@mockify_everything
def test_inferring_embedding_dim(self):
self._leo(self._problem)
self.assertEqual(self._leo.embedding_dim, 4)
@mockify_everything(mock_encdec=False, mock_finetuning=False)
def test_variable_creation(self):
self._leo(self._problem)
encoder_variables = snt.get_variables_in_scope("leo/encoder")
self.assertNotEmpty(encoder_variables)
relation_network_variables = snt.get_variables_in_scope(
"leo/relation_network")
self.assertNotEmpty(relation_network_variables)
decoder_variables = snt.get_variables_in_scope("leo/decoder")
self.assertNotEmpty(decoder_variables)
inner_lr = snt.get_variables_in_scope("leo/leo_inner")
self.assertNotEmpty(inner_lr)
finetuning_lr = snt.get_variables_in_scope("leo/finetuning")
self.assertNotEmpty(finetuning_lr)
self.assertSameElements(
encoder_variables + relation_network_variables + decoder_variables +
inner_lr + finetuning_lr, self._leo.trainable_variables)
def test_graph_construction(self):
self._leo(self._problem)
def test_possibly_sample(self):
# Embedding dimension has to be divisible by 2 here.
self._leo(self._problem, is_meta_training=True)
train_samples, train_kl = self._leo.possibly_sample(self._problem.tr_input)
self._leo(self._problem, is_meta_training=False)
test_samples, test_kl = self._leo.possibly_sample(self._problem.tr_input)
with self.session() as sess:
train_samples_ev1, test_samples_ev1 = sess.run(
[train_samples, test_samples])
train_samples_ev2, test_samples_ev2 = sess.run(
[train_samples, test_samples])
self.assertAllClose(test_samples_ev1, test_samples_ev2)
self.assertGreater(abs(np.sum(train_samples_ev1 - train_samples_ev2)), 1.)
train_kl_ev, test_kl_ev = sess.run([train_kl, test_kl])
self.assertNotEqual(train_kl_ev, 0.)
self.assertEqual(test_kl_ev, 0.)
def test_different_shapes(self):
problem_instance2 = _random_problem_instance(5, 6, 13)
self._leo(self._problem)
with self.assertRaises(AssertionError):
self._leo(problem_instance2)
def test_encoder_penalty(self):
self._leo(self._problem) # Sets is_meta_training
latents, _ = self._leo.forward_encoder(self._problem)
_, _, train_encoder_penalty = self._leo.leo_inner_loop(
self._problem, latents)
self._leo(self._problem, is_meta_training=False)
_, _, test_encoder_penalty = self._leo.leo_inner_loop(
self._problem, latents)
with self.session() as sess:
sess.run(tf.initializers.global_variables())
train_encoder_penalty_ev, test_encoder_penalty_ev = sess.run(
[train_encoder_penalty, test_encoder_penalty])
self.assertGreater(train_encoder_penalty_ev, 1e-3)
self.assertLess(test_encoder_penalty_ev, 1e-7)
def test_construct_float32_leo_graph(self):
leo = model.LEO(use_64bits_dtype=False, config=self._config)
problem_instance_32_bits = _random_problem_instance(use_64bits_dtype=False)
leo(problem_instance_32_bits)
if __name__ == "__main__":
tf.test.main()
|
leo-master
|
model_test.py
|
# coding=utf8
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A module containing just the configs for the different LEO parts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("data_path", None, "Path to the dataset.")
flags.DEFINE_string(
"dataset_name", "miniImageNet", "Name of the dataset to "
"train on, which will be mapped to data.MetaDataset.")
flags.DEFINE_string(
"embedding_crop", "center", "Type of the cropping, which "
"will be mapped to data.EmbeddingCrop.")
flags.DEFINE_boolean("train_on_val", False, "Whether to train on the "
"validation data.")
flags.DEFINE_integer(
"inner_unroll_length", 5, "Number of unroll steps in the "
"inner loop of leo (number of adaptation steps in the "
"latent space).")
flags.DEFINE_integer(
"finetuning_unroll_length", 5, "Number of unroll steps "
"in the loop performing finetuning (number of adaptation "
"steps in the parameter space).")
flags.DEFINE_integer("num_latents", 64, "The dimensionality of the latent "
"space.")
flags.DEFINE_float(
"inner_lr_init", 1.0, "The initialization value for the "
"learning rate of the inner loop of leo.")
flags.DEFINE_float(
"finetuning_lr_init", 0.001, "The initialization value for "
"learning rate of the finetuning loop.")
flags.DEFINE_float("dropout_rate", 0.5, "Rate of dropout: probability of "
"dropping a given unit.")
flags.DEFINE_float(
"kl_weight", 1e-3, "The weight measuring importance of the "
"KL in the final loss. β in the paper.")
flags.DEFINE_float(
"encoder_penalty_weight", 1e-9, "The weight measuring "
"importance of the encoder penalty in the final loss. γ in "
"the paper.")
flags.DEFINE_float("l2_penalty_weight", 1e-8, "The weight measuring the "
"importance of the l2 regularization in the final loss. λ₁ "
"in the paper.")
flags.DEFINE_float("orthogonality_penalty_weight", 1e-3, "The weight measuring "
"the importance of the decoder orthogonality regularization "
"in the final loss. λ₂ in the paper.")
flags.DEFINE_integer(
"num_classes", 5, "Number of classes, N in N-way classification.")
flags.DEFINE_integer(
"num_tr_examples_per_class", 1, "Number of training samples per class, "
"K in K-shot classification.")
flags.DEFINE_integer(
"num_val_examples_per_class", 15, "Number of validation samples per class "
"in a task instance.")
flags.DEFINE_integer("metatrain_batch_size", 12, "Number of problem instances "
"in a batch.")
flags.DEFINE_integer("metavalid_batch_size", 200, "Number of meta-validation "
"problem instances.")
flags.DEFINE_integer("metatest_batch_size", 200, "Number of meta-testing "
"problem instances.")
flags.DEFINE_integer("num_steps_limit", int(1e5), "Number of steps to train "
"for.")
flags.DEFINE_float("outer_lr", 1e-4, "Outer (metatraining) loop learning "
"rate.")
flags.DEFINE_float(
"gradient_threshold", 0.1, "The cutoff for the gradient "
"clipping. Gradients will be clipped to "
"[-gradient_threshold, gradient_threshold]")
flags.DEFINE_float(
"gradient_norm_threshold", 0.1, "The cutoff for clipping of "
"the gradient norm. Gradient norm clipping will be applied "
"after pointwise clipping (described above).")
def get_data_config():
config = {}
config["data_path"] = FLAGS.data_path
config["dataset_name"] = FLAGS.dataset_name
config["embedding_crop"] = FLAGS.embedding_crop
config["train_on_val"] = FLAGS.train_on_val
config["total_examples_per_class"] = 600
return config
def get_inner_model_config():
"""Returns the config used to initialize LEO model."""
config = {}
config["inner_unroll_length"] = FLAGS.inner_unroll_length
config["finetuning_unroll_length"] = FLAGS.finetuning_unroll_length
config["num_latents"] = FLAGS.num_latents
config["inner_lr_init"] = FLAGS.inner_lr_init
config["finetuning_lr_init"] = FLAGS.finetuning_lr_init
config["dropout_rate"] = FLAGS.dropout_rate
config["kl_weight"] = FLAGS.kl_weight
config["encoder_penalty_weight"] = FLAGS.encoder_penalty_weight
config["l2_penalty_weight"] = FLAGS.l2_penalty_weight
config["orthogonality_penalty_weight"] = FLAGS.orthogonality_penalty_weight
return config
def get_outer_model_config():
"""Returns the outer config file for N-way K-shot classification tasks."""
config = {}
config["num_classes"] = FLAGS.num_classes
config["num_tr_examples_per_class"] = FLAGS.num_tr_examples_per_class
config["num_val_examples_per_class"] = FLAGS.num_val_examples_per_class
config["metatrain_batch_size"] = FLAGS.metatrain_batch_size
config["metavalid_batch_size"] = FLAGS.metavalid_batch_size
config["metatest_batch_size"] = FLAGS.metatest_batch_size
config["num_steps_limit"] = FLAGS.num_steps_limit
config["outer_lr"] = FLAGS.outer_lr
config["gradient_threshold"] = FLAGS.gradient_threshold
config["gradient_norm_threshold"] = FLAGS.gradient_norm_threshold
return config
|
leo-master
|
config.py
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Code defining LEO inner loop.
See "Meta-Learning with Latent Embedding Optimization" by Rusu et al.
(https://arxiv.org/pdf/1807.05960.pdf).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import data as data_module
def get_orthogonality_regularizer(orthogonality_penalty_weight):
"""Returns the orthogonality regularizer."""
def orthogonality(weight):
"""Calculates the layer-wise penalty encouraging orthogonality."""
with tf.name_scope(None, "orthogonality", [weight]) as name:
w2 = tf.matmul(weight, weight, transpose_b=True)
wn = tf.norm(weight, ord=2, axis=1, keepdims=True) + 1e-32
correlation_matrix = w2 / tf.matmul(wn, wn, transpose_b=True)
matrix_size = correlation_matrix.get_shape().as_list()[0]
base_dtype = weight.dtype.base_dtype
identity = tf.eye(matrix_size, dtype=base_dtype)
weight_corr = tf.reduce_mean(
tf.squared_difference(correlation_matrix, identity))
return tf.multiply(
tf.cast(orthogonality_penalty_weight, base_dtype),
weight_corr,
name=name)
return orthogonality
class LEO(snt.AbstractModule):
"""Sonnet module implementing the inner loop of LEO."""
def __init__(self, config=None, use_64bits_dtype=True, name="leo"):
super(LEO, self).__init__(name=name)
self._float_dtype = tf.float64 if use_64bits_dtype else tf.float32
self._int_dtype = tf.int64 if use_64bits_dtype else tf.int32
self._inner_unroll_length = config["inner_unroll_length"]
self._finetuning_unroll_length = config["finetuning_unroll_length"]
self._inner_lr_init = config["inner_lr_init"]
self._finetuning_lr_init = config["finetuning_lr_init"]
self._num_latents = config["num_latents"]
self._dropout_rate = config["dropout_rate"]
self._kl_weight = config["kl_weight"] # beta
self._encoder_penalty_weight = config["encoder_penalty_weight"] # gamma
self._l2_penalty_weight = config["l2_penalty_weight"] # lambda_1
# lambda_2
self._orthogonality_penalty_weight = config["orthogonality_penalty_weight"]
assert self._inner_unroll_length > 0, ("Positive unroll length is necessary"
" to create the graph")
def _build(self, data, is_meta_training=True):
"""Connects the LEO module to the graph, creating the variables.
Args:
data: A data_module.ProblemInstance constaining Tensors with the
following shapes:
- tr_input: (N, K, dim)
- tr_output: (N, K, 1)
- tr_info: (N, K)
- val_input: (N, K_valid, dim)
- val_output: (N, K_valid, 1)
- val_info: (N, K_valid)
where N is the number of classes (as in N-way) and K and the and
K_valid are numbers of training and validation examples within a
problem instance correspondingly (as in K-shot), and dim is the
dimensionality of the embedding.
is_meta_training: A boolean describing whether we run in the training
mode.
Returns:
Tensor with the inner validation loss of LEO (include both adaptation in
the latent space and finetuning).
"""
if isinstance(data, list):
data = data_module.ProblemInstance(*data)
self.is_meta_training = is_meta_training
self.save_problem_instance_stats(data.tr_input)
latents, kl = self.forward_encoder(data)
tr_loss, adapted_classifier_weights, encoder_penalty = self.leo_inner_loop(
data, latents)
val_loss, val_accuracy = self.finetuning_inner_loop(
data, tr_loss, adapted_classifier_weights)
val_loss += self._kl_weight * kl
val_loss += self._encoder_penalty_weight * encoder_penalty
# The l2 regularization is is already added to the graph when constructing
# the snt.Linear modules. We pass the orthogonality regularizer separately,
# because it is not used in self.grads_and_vars.
regularization_penalty = (
self._l2_regularization + self._decoder_orthogonality_reg)
batch_val_loss = tf.reduce_mean(val_loss)
batch_val_accuracy = tf.reduce_mean(val_accuracy)
return batch_val_loss + regularization_penalty, batch_val_accuracy
@snt.reuse_variables
def leo_inner_loop(self, data, latents):
with tf.variable_scope("leo_inner"):
inner_lr = tf.get_variable(
"lr", [1, 1, self._num_latents],
dtype=self._float_dtype,
initializer=tf.constant_initializer(self._inner_lr_init))
starting_latents = latents
loss, _ = self.forward_decoder(data, latents)
for _ in range(self._inner_unroll_length):
loss_grad = tf.gradients(loss, latents) # dLtrain/dz
latents -= inner_lr * loss_grad[0]
loss, classifier_weights = self.forward_decoder(data, latents)
if self.is_meta_training:
encoder_penalty = tf.losses.mean_squared_error(
labels=tf.stop_gradient(latents), predictions=starting_latents)
encoder_penalty = tf.cast(encoder_penalty, self._float_dtype)
else:
encoder_penalty = tf.constant(0., self._float_dtype)
return loss, classifier_weights, encoder_penalty
@snt.reuse_variables
def finetuning_inner_loop(self, data, leo_loss, classifier_weights):
tr_loss = leo_loss
with tf.variable_scope("finetuning"):
finetuning_lr = tf.get_variable(
"lr", [1, 1, self.embedding_dim],
dtype=self._float_dtype,
initializer=tf.constant_initializer(self._finetuning_lr_init))
for _ in range(self._finetuning_unroll_length):
loss_grad = tf.gradients(tr_loss, classifier_weights)
classifier_weights -= finetuning_lr * loss_grad[0]
tr_loss, _ = self.calculate_inner_loss(data.tr_input, data.tr_output,
classifier_weights)
val_loss, val_accuracy = self.calculate_inner_loss(
data.val_input, data.val_output, classifier_weights)
return val_loss, val_accuracy
@snt.reuse_variables
def forward_encoder(self, data):
encoder_outputs = self.encoder(data.tr_input)
relation_network_outputs = self.relation_network(encoder_outputs)
latent_dist_params = self.average_codes_per_class(relation_network_outputs)
latents, kl = self.possibly_sample(latent_dist_params)
return latents, kl
@snt.reuse_variables
def forward_decoder(self, data, latents):
weights_dist_params = self.decoder(latents)
# Default to glorot_initialization and not stddev=1.
fan_in = self.embedding_dim.value
fan_out = self.num_classes.value
stddev_offset = np.sqrt(2. / (fan_out + fan_in))
classifier_weights, _ = self.possibly_sample(weights_dist_params,
stddev_offset=stddev_offset)
tr_loss, _ = self.calculate_inner_loss(data.tr_input, data.tr_output,
classifier_weights)
return tr_loss, classifier_weights
@snt.reuse_variables
def encoder(self, inputs):
with tf.variable_scope("encoder"):
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
encoder_module = snt.Linear(
self._num_latents,
use_bias=False,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(encoder_module)(after_dropout)
return outputs
@snt.reuse_variables
def relation_network(self, inputs):
with tf.variable_scope("relation_network"):
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
relation_network_module = snt.nets.MLP(
[2 * self._num_latents] * 3,
use_bias=False,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
total_num_examples = self.num_examples_per_class*self.num_classes
inputs = tf.reshape(inputs, [total_num_examples, self._num_latents])
left = tf.tile(tf.expand_dims(inputs, 1), [1, total_num_examples, 1])
right = tf.tile(tf.expand_dims(inputs, 0), [total_num_examples, 1, 1])
concat_codes = tf.concat([left, right], axis=-1)
outputs = snt.BatchApply(relation_network_module)(concat_codes)
outputs = tf.reduce_mean(outputs, axis=1)
# 2 * latents, because we are returning means and variances of a Gaussian
outputs = tf.reshape(outputs, [self.num_classes,
self.num_examples_per_class,
2 * self._num_latents])
return outputs
@snt.reuse_variables
def decoder(self, inputs):
with tf.variable_scope("decoder"):
l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
orthogonality_reg = get_orthogonality_regularizer(
self._orthogonality_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# 2 * embedding_dim, because we are returning means and variances
decoder_module = snt.Linear(
2 * self.embedding_dim,
use_bias=False,
regularizers={"w": l2_regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(decoder_module)(inputs)
self._orthogonality_reg = orthogonality_reg(decoder_module.w)
return outputs
def average_codes_per_class(self, codes):
codes = tf.reduce_mean(codes, axis=1, keep_dims=True) # K dimension
# Keep the shape (N, K, *)
codes = tf.tile(codes, [1, self.num_examples_per_class, 1])
return codes
def possibly_sample(self, distribution_params, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tfp.distributions.Normal(loc=means, scale=stddev)
if not self.is_meta_training:
return means, tf.constant(0., dtype=self._float_dtype)
samples = distribution.sample()
kl_divergence = self.kl_divergence(samples, distribution)
return samples, kl_divergence
def kl_divergence(self, samples, normal_distribution):
random_prior = tfp.distributions.Normal(
loc=tf.zeros_like(samples), scale=tf.ones_like(samples))
kl = tf.reduce_mean(
normal_distribution.log_prob(samples) - random_prior.log_prob(samples))
return kl
def predict(self, inputs, weights):
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
# This is 3-dimensional equivalent of a matrix product, where we sum over
# the last (embedding_dim) dimension. We get [N, K, N, K] tensor as output.
per_image_predictions = tf.einsum("ijk,lmk->ijlm", after_dropout, weights)
# Predictions have shape [N, K, N]: for each image ([N, K] of them), what
# is the probability of a given class (N)?
predictions = tf.reduce_mean(per_image_predictions, axis=-1)
return predictions
def calculate_inner_loss(self, inputs, true_outputs, classifier_weights):
model_outputs = self.predict(inputs, classifier_weights)
model_predictions = tf.argmax(
model_outputs, -1, output_type=self._int_dtype)
accuracy = tf.contrib.metrics.accuracy(model_predictions,
tf.squeeze(true_outputs, axis=-1))
return self.loss_fn(model_outputs, true_outputs), accuracy
def save_problem_instance_stats(self, instance):
num_classes, num_examples_per_class, embedding_dim = instance.get_shape()
if hasattr(self, "num_classes"):
assert self.num_classes == num_classes, (
"Given different number of classes (N in N-way) in consecutive runs.")
if hasattr(self, "num_examples_per_class"):
assert self.num_examples_per_class == num_examples_per_class, (
"Given different number of examples (K in K-shot) in consecutive"
"runs.")
if hasattr(self, "embedding_dim"):
assert self.embedding_dim == embedding_dim, (
"Given different embedding dimension in consecutive runs.")
self.num_classes = num_classes
self.num_examples_per_class = num_examples_per_class
self.embedding_dim = embedding_dim
@property
def dropout_rate(self):
return self._dropout_rate if self.is_meta_training else 0.0
def loss_fn(self, model_outputs, original_classes):
original_classes = tf.squeeze(original_classes, axis=-1)
# Tensorflow doesn't handle second order gradients of a sparse_softmax yet.
one_hot_outputs = tf.one_hot(original_classes, depth=self.num_classes)
return tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_outputs, logits=model_outputs)
def grads_and_vars(self, metatrain_loss):
"""Computes gradients of metatrain_loss, avoiding NaN.
Uses a fixed penalty of 1e-4 to enforce only the l2 regularization (and not
minimize the loss) when metatrain_loss or any of its gradients with respect
to trainable_vars are NaN. In practice, this approach pulls the variables
back into a feasible region of the space when the loss or its gradients are
not defined.
Args:
metatrain_loss: A tensor with the LEO meta-training loss.
Returns:
A tuple with:
metatrain_gradients: A list of gradient tensors.
metatrain_variables: A list of variables for this LEO model.
"""
metatrain_variables = self.trainable_variables
metatrain_gradients = tf.gradients(metatrain_loss, metatrain_variables)
nan_loss_or_grad = tf.logical_or(
tf.is_nan(metatrain_loss),
tf.reduce_any([tf.reduce_any(tf.is_nan(g))
for g in metatrain_gradients]))
regularization_penalty = (
1e-4 / self._l2_penalty_weight * self._l2_regularization)
zero_or_regularization_gradients = [
g if g is not None else tf.zeros_like(v)
for v, g in zip(tf.gradients(regularization_penalty,
metatrain_variables), metatrain_variables)]
metatrain_gradients = tf.cond(nan_loss_or_grad,
lambda: zero_or_regularization_gradients,
lambda: metatrain_gradients, strict=True)
return metatrain_gradients, metatrain_variables
@property
def _l2_regularization(self):
return tf.cast(
tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
dtype=self._float_dtype)
@property
def _decoder_orthogonality_reg(self):
return self._orthogonality_reg
|
leo-master
|
model.py
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Short utility functions for LEO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
from six.moves import range
import tensorflow as tf
import config
import data
def unpack_data(problem_instance):
"""Map data.ProblemInstance to a list of Tensors, to process with map_fn."""
if isinstance(problem_instance, data.ProblemInstance):
return list(problem_instance)
return problem_instance
def copy_checkpoint(checkpoint_path, global_step, accuracy):
"""Copies the checkpoint to a separate directory."""
tmp_checkpoint_path = os.path.join(checkpoint_path, "tmp_best_checkpoint")
best_checkpoint_path = os.path.join(checkpoint_path, "best_checkpoint")
if _is_previous_accuracy_better(best_checkpoint_path, accuracy):
tf.logging.info("Not copying the checkpoint: there is a better one from "
"before a preemption.")
return
checkpoint_regex = os.path.join(checkpoint_path,
"model.ckpt-{}.*".format(global_step))
checkpoint_files = tf.gfile.Glob(checkpoint_regex)
graph_file = os.path.join(checkpoint_path, "graph.pbtxt")
checkpoint_files.append(graph_file)
_save_files_in_tmp_directory(tmp_checkpoint_path, checkpoint_files, accuracy)
new_checkpoint_index_file = os.path.join(tmp_checkpoint_path, "checkpoint")
with tf.gfile.Open(new_checkpoint_index_file, "w") as f:
f.write("model_checkpoint_path: \"{}/model.ckpt-{}\"\n".format(
best_checkpoint_path, global_step))
# We first copy the better checkpoint to a temporary directory, and only
# when it's created move it to avoid inconsistent state when job is preempted
# when copying the checkpoint.
if tf.gfile.Exists(best_checkpoint_path):
tf.gfile.DeleteRecursively(best_checkpoint_path)
tf.gfile.Rename(tmp_checkpoint_path, best_checkpoint_path)
tf.logging.info("Copied new best checkpoint with accuracy %.5f", accuracy)
def _save_files_in_tmp_directory(tmp_checkpoint_path, checkpoint_files,
accuracy):
"""Saves the checkpoint files and accuracy in a temporary directory."""
if tf.gfile.Exists(tmp_checkpoint_path):
tf.logging.info("The temporary directory exists, because job was preempted "
"before it managed to move it. We're removing it.")
tf.gfile.DeleteRecursively(tmp_checkpoint_path)
tf.gfile.MkDir(tmp_checkpoint_path)
def dump_in_best_checkpoint_path(obj, filename):
full_path = os.path.join(tmp_checkpoint_path, filename)
with tf.gfile.Open(full_path, "wb") as f:
pickle.dump(obj, f)
for file_ in checkpoint_files:
just_filename = file_.split("/")[-1]
tf.gfile.Copy(
file_,
os.path.join(tmp_checkpoint_path, just_filename),
overwrite=False)
dump_in_best_checkpoint_path(config.get_inner_model_config(), "inner_config")
dump_in_best_checkpoint_path(config.get_outer_model_config(), "outer_config")
dump_in_best_checkpoint_path(accuracy, "accuracy")
def _is_previous_accuracy_better(best_checkpoint_path, accuracy):
if not tf.gfile.Exists(best_checkpoint_path):
return False
previous_accuracy_file = os.path.join(best_checkpoint_path, "accuracy")
with tf.gfile.Open(previous_accuracy_file, "rb") as f:
previous_accuracy = pickle.load(f)
return previous_accuracy > accuracy
def evaluate_and_average(session, tensor, num_estimates):
tensor_value_estimates = [session.run(tensor) for _ in range(num_estimates)]
average_tensor_value = sum(tensor_value_estimates) / num_estimates
return average_tensor_value
|
leo-master
|
utils.py
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Creates problem instances for LEO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import random
import enum
import numpy as np
import six
import tensorflow as tf
NDIM = 640
ProblemInstance = collections.namedtuple(
"ProblemInstance",
["tr_input", "tr_output", "tr_info", "val_input", "val_output", "val_info"])
class StrEnum(enum.Enum):
"""An Enum represented by a string."""
def __str__(self):
return self.value
def __repr__(self):
return self.__str__()
class MetaDataset(StrEnum):
"""Datasets supported by the DataProvider class."""
MINI = "miniImageNet"
TIERED = "tieredImageNet"
class EmbeddingCrop(StrEnum):
"""Embedding types supported by the DataProvider class."""
CENTER = "center"
MULTIVIEW = "multiview"
class MetaSplit(StrEnum):
"""Meta-datasets split supported by the DataProvider class."""
TRAIN = "train"
VALID = "val"
TEST = "test"
class DataProvider(object):
"""Creates problem instances from a specific split and dataset."""
def __init__(self, dataset_split, config, verbose=False):
self._dataset_split = MetaSplit(dataset_split)
self._config = config
self._verbose = verbose
self._check_config()
self._index_data(self._load_data())
def _check_config(self):
"""Checks configuration arguments of constructor."""
self._config["dataset_name"] = MetaDataset(self._config["dataset_name"])
self._config["embedding_crop"] = EmbeddingCrop(
self._config["embedding_crop"])
if self._config["dataset_name"] == MetaDataset.TIERED:
error_message = "embedding_crop: {} not supported for {}".format(
self._config["embedding_crop"], self._config["dataset_name"])
assert self._config[
"embedding_crop"] == EmbeddingCrop.CENTER, error_message
def _load_data(self):
"""Loads data into memory and caches ."""
raw_data = self._load(
tf.gfile.Open(self._get_full_pickle_path(self._dataset_split), "rb"))
if self._dataset_split == MetaSplit.TRAIN and self._config["train_on_val"]:
valid_data = self._load(
tf.gfile.Open(self._get_full_pickle_path(MetaSplit.VALID), "rb"))
for key in valid_data:
if self._verbose:
tf.logging.info(str([key, raw_data[key].shape]))
raw_data[key] = np.concatenate([raw_data[key],
valid_data[key]], axis=0)
if self._verbose:
tf.logging.info(str([key, raw_data[key].shape]))
if self._verbose:
tf.logging.info(
str([(k, np.shape(v)) for k, v in six.iteritems(raw_data)]))
return raw_data
def _load(self, opened_file):
if six.PY2:
result = pickle.load(opened_file)
else:
result = pickle.load(opened_file, encoding="latin1") # pylint: disable=unexpected-keyword-arg
return result
def _index_data(self, raw_data):
"""Builds an index of images embeddings by class."""
self._all_class_images = collections.OrderedDict()
self._image_embedding = collections.OrderedDict()
for i, k in enumerate(raw_data["keys"]):
_, class_label, image_file = k.split("-")
image_file_class_label = image_file.split("_")[0]
assert class_label == image_file_class_label
self._image_embedding[image_file] = raw_data["embeddings"][i]
if class_label not in self._all_class_images:
self._all_class_images[class_label] = []
self._all_class_images[class_label].append(image_file)
self._check_data_index(raw_data)
self._all_class_images = collections.OrderedDict([
(k, np.array(v)) for k, v in six.iteritems(self._all_class_images)
])
if self._verbose:
tf.logging.info(str([len(raw_data), len(self._all_class_images),
len(self._image_embedding)]))
def _check_data_index(self, raw_data):
"""Performs checks of the data index and image counts per class."""
n = raw_data["keys"].shape[0]
error_message = "{} != {}".format(len(self._image_embedding), n)
assert len(self._image_embedding) == n, error_message
error_message = "{} != {}".format(raw_data["embeddings"].shape[0], n)
assert raw_data["embeddings"].shape[0] == n, error_message
all_class_folders = list(self._all_class_images.keys())
error_message = "no duplicate class names"
assert len(set(all_class_folders)) == len(all_class_folders), error_message
image_counts = set([len(class_images)
for class_images in self._all_class_images.values()])
error_message = ("len(image_counts) should have at least one element but "
"is: {}").format(image_counts)
assert len(image_counts) >= 1, error_message
assert min(image_counts) > 0
def _get_full_pickle_path(self, split_name):
full_pickle_path = os.path.join(
self._config["data_path"],
str(self._config["dataset_name"]),
str(self._config["embedding_crop"]),
"{}_embeddings.pkl".format(split_name))
if self._verbose:
tf.logging.info("get_one_emb_instance: folder_path: {}".format(
full_pickle_path))
return full_pickle_path
def get_instance(self, num_classes, tr_size, val_size):
"""Samples a random N-way K-shot classification problem instance.
Args:
num_classes: N in N-way classification.
tr_size: K in K-shot; number of training examples per class.
val_size: number of validation examples per class.
Returns:
A tuple with 6 Tensors with the following shapes:
- tr_input: (num_classes, tr_size, NDIM): training image embeddings.
- tr_output: (num_classes, tr_size, 1): training image labels.
- tr_info: (num_classes, tr_size): training image file names.
- val_input: (num_classes, val_size, NDIM): validation image embeddings.
- val_output: (num_classes, val_size, 1): validation image labels.
- val_input: (num_classes, val_size): validation image file names.
"""
def _build_one_instance_py():
"""Builds a random problem instance using data from specified classes."""
class_list = list(self._all_class_images.keys())
sample_count = (tr_size + val_size)
shuffled_folders = class_list[:]
random.shuffle(shuffled_folders)
shuffled_folders = shuffled_folders[:num_classes]
error_message = "len(shuffled_folders) {} is not num_classes: {}".format(
len(shuffled_folders), num_classes)
assert len(shuffled_folders) == num_classes, error_message
image_paths = []
class_ids = []
embeddings = self._image_embedding
for class_id, class_name in enumerate(shuffled_folders):
all_images = self._all_class_images[class_name]
all_images = np.random.choice(all_images, sample_count, replace=False)
error_message = "{} == {} failed".format(len(all_images), sample_count)
assert len(all_images) == sample_count, error_message
image_paths.append(all_images)
class_ids.append([[class_id]]*sample_count)
label_array = np.array(class_ids, dtype=np.int32)
if self._verbose:
tf.logging.info(label_array.shape)
if self._verbose:
tf.logging.info(label_array.shape)
path_array = np.array(image_paths)
if self._verbose:
tf.logging.info(path_array.shape)
if self._verbose:
tf.logging.info(path_array.shape)
embedding_array = np.array([[embeddings[image_path]
for image_path in class_paths]
for class_paths in path_array])
if self._verbose:
tf.logging.info(embedding_array.shape)
return embedding_array, label_array, path_array
output_list = tf.py_func(_build_one_instance_py, [],
[tf.float32, tf.int32, tf.string])
instance_input, instance_output, instance_info = output_list
instance_input = tf.nn.l2_normalize(instance_input, axis=-1)
instance_info = tf.regex_replace(instance_info, "\x00*", "")
if self._verbose:
tf.logging.info("input_batch: {} ".format(instance_input.shape))
tf.logging.info("output_batch: {} ".format(instance_output.shape))
tf.logging.info("info_batch: {} ".format(instance_info.shape))
split_sizes = [tr_size, val_size]
tr_input, val_input = tf.split(instance_input, split_sizes, axis=1)
tr_output, val_output = tf.split(instance_output, split_sizes, axis=1)
tr_info, val_info = tf.split(instance_info, split_sizes, axis=1)
if self._verbose:
tf.logging.info("tr_output: {} ".format(tr_output))
tf.logging.info("val_output: {}".format(val_output))
with tf.control_dependencies(
self._check_labels(num_classes, tr_size, val_size,
tr_output, val_output)):
tr_output = tf.identity(tr_output)
val_output = tf.identity(val_output)
return tr_input, tr_output, tr_info, val_input, val_output, val_info
def get_batch(self, batch_size, num_classes, tr_size, val_size,
num_threads=10):
"""Returns a batch of random N-way K-shot classification problem instances.
Args:
batch_size: number of problem instances in the batch.
num_classes: N in N-way classification.
tr_size: K in K-shot; number of training examples per class.
val_size: number of validation examples per class.
num_threads: number of threads used to sample problem instances in
parallel.
Returns:
A ProblemInstance of Tensors with the following shapes:
- tr_input: (batch_size, num_classes, tr_size, NDIM): training image
embeddings.
- tr_output: (batch_size, num_classes, tr_size, 1): training image
labels.
- tr_info: (batch_size, num_classes, tr_size): training image file
names.
- val_input: (batch_size, num_classes, val_size, NDIM): validation
image embeddings.
- val_output: (batch_size, num_classes, val_size, 1): validation
image labels.
- val_info: (batch_size, num_classes, val_size): validation image
file names.
"""
if self._verbose:
num_threads = 1
one_instance = self.get_instance(num_classes, tr_size, val_size)
tr_data_size = (num_classes, tr_size)
val_data_size = (num_classes, val_size)
task_batch = tf.train.shuffle_batch(one_instance, batch_size=batch_size,
capacity=1000, min_after_dequeue=0,
enqueue_many=False,
shapes=[tr_data_size + (NDIM,),
tr_data_size + (1,),
tr_data_size,
val_data_size + (NDIM,),
val_data_size + (1,),
val_data_size],
num_threads=num_threads)
if self._verbose:
tf.logging.info(task_batch)
return ProblemInstance(*task_batch)
def _check_labels(self, num_classes, tr_size, val_size,
tr_output, val_output):
correct_label_sum = (num_classes*(num_classes-1))//2
tr_label_sum = tf.reduce_sum(tr_output)/tr_size
val_label_sum = tf.reduce_sum(val_output)/val_size
all_label_asserts = [
tf.assert_equal(tf.to_int32(tr_label_sum), correct_label_sum),
tf.assert_equal(tf.to_int32(val_label_sum), correct_label_sum),
]
return all_label_asserts
|
leo-master
|
data.py
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Random agent for running against DM Lab2D environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
def _make_int32_distribution(random, minimum, maximum):
def function():
return random.randint(minimum, maximum + 1)
return function
def _make_float64_distribution(random, minimum, maximum):
def function():
return random.uniform(minimum, maximum)
return function
class PyGameRandomAgent(object):
"""Random agent works with int32 or float64 bounded actions."""
def __init__(self, action_spec, observation_name, observation_spec, seed,
scale):
"""Create a PyGame agent.
Args:
action_spec: Environment action spec used to generate random actions.
observation_name: Name of observation to render each frame.
observation_spec: Environment observation spec for creating PyGame window.
seed: Agent seed used for generating random actions.
scale: Scales screen.
"""
self._observation_name = observation_name
random = np.random.RandomState(seed)
self._actions = []
self._scores = []
self._scale = scale
for name, spec in action_spec.items():
if spec.dtype == np.dtype('int32'):
self._actions.append(
(name, _make_int32_distribution(random, spec.minimum,
spec.maximum)))
elif spec.dtype == np.dtype('float64'):
self._actions.append(
(name, _make_float64_distribution(random, spec.minimum,
spec.maximum)))
else:
print("Warning '{}' is not supported".format(spec))
obs_spec = observation_spec[observation_name]
self._setup_py_game(obs_spec.shape)
def _setup_py_game(self, shape):
pygame.init()
pygame.display.set_caption('DM Lab2d')
self._game_display = pygame.display.set_mode(
(int(shape[1] * self._scale), int(shape[0] * self._scale)))
def _render_observation(self, observation):
obs = np.transpose(observation, (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (int(rect[2] * self._scale), int(rect[3] * self._scale)))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
def step(self, timestep):
"""Renders timestep and returns random actions according to spec."""
self._render_observation(timestep.observation[self._observation_name])
display_score_dirty = False
if timestep.reward is not None:
if timestep.reward != 0:
self._scores[-1] += timestep.reward
display_score_dirty = True
else:
self._scores.append(0)
display_score_dirty = True
if display_score_dirty:
pygame.display.set_caption('%d score' % self._scores[-1])
return {name: gen() for name, gen in self._actions}
def print_stats(self):
print('Scores: ' + ', '.join(str(score) for score in self._scores))
def _create_environment(args):
"""Creates an environment.
Args:
args: See `main()` for description of args.
Returns:
dmlab2d.Environment with one observation.
"""
args.settings['levelName'] = args.level_name
lab2d = dmlab2d.Lab2d(runfiles_helper.find(), args.settings)
return dmlab2d.Environment(lab2d, [args.observation], args.env_seed)
def _run(args):
"""Runs a random agent against an environment rendering the results.
Args:
args: See `main()` for description of args.
"""
env = _create_environment(args)
agent = PyGameRandomAgent(env.action_spec(), args.observation,
env.observation_spec(), args.agent_seed, args.scale)
for _ in range(args.num_episodes):
timestep = env.reset()
# Run single episode.
while True:
# Query PyGame for early termination.
if any(event.type == pygame.QUIT for event in pygame.event.get()):
print('Exit early last score may be truncated:')
agent.print_stats()
return
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
# Observe last frame of episode.
agent.step(timestep)
break
# All episodes completed, report per episode.
agent.print_stats()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up', help='Level name to load')
parser.add_argument(
'--observation',
type=str,
default='WORLD.RGB',
help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--env_seed', type=int, default=0, help='Environment seed')
parser.add_argument('--agent_seed', type=int, default=0, help='Agent seed')
parser.add_argument(
'--num_episodes', type=int, default=1, help='Number of episodes')
parser.add_argument(
'--scale', type=float, default=1, help='Scale to render screen')
args = parser.parse_args()
_run(args)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/random_agent.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.settings_helper."""
from absl.testing import absltest
from dmlab2d import settings_helper
class SettingsHelperTest(absltest.TestCase):
def test_flatten_real(self):
config = {}
config['levelName'] = 'Name'
config['levelDirectory'] = 'Dir'
config['simulation'] = {'positions': ('cat', 1, 2)}
self.assertEqual(
settings_helper.flatten_args(config), {
'levelName': 'Name',
'levelDirectory': 'Dir',
'simulation.positions.1': 'cat',
'simulation.positions.2': '1',
'simulation.positions.3': '2',
})
def test_flatten_args_self_ref(self):
a = {'key': 1}
b = {'key': 2}
b['a'] = a
a['b'] = b
self.assertEqual(
settings_helper.flatten_args(a), {
'key': '1',
'b.key': '2',
})
self.assertEqual(
settings_helper.flatten_args(b), {
'key': '2',
'a.key': '1',
})
def test_flatten_args_ref_same(self):
b = {'key': 2}
a = {'b1': b, 'b2': b}
self.assertEqual(
settings_helper.flatten_args(a), {
'b1.key': '2',
'b2.key': '2',
})
self.assertEqual(settings_helper.flatten_args(b), {'key': '2'})
def test_flatten_args(self):
self.assertEqual(settings_helper.flatten_args({'key': 10}), {'key': '10'})
def test_flatten_args_tree(self):
args = {
'level_name': {
'rewards': {
'flag': 1.0
}
},
'team_rewards': {
'flag': 0.0
},
'teams': [1, 5],
'flags':
True,
'foo':
False,
'bar':
None,
'nested': [
{
'a': 3,
'b': 5
},
{
'c': tuple(range(12))
}, # Verify sorting is correct.
]
}
flat_args = settings_helper.flatten_args(args)
expected_args = {
'level_name.rewards.flag': '1.0',
'team_rewards.flag': '0.0',
'teams.1': '1',
'teams.2': '5',
'flags': 'true',
'foo': 'false',
'bar': 'none',
'nested.1.a': '3',
'nested.1.b': '5',
'nested.2.c.1': '0',
'nested.2.c.2': '1',
'nested.2.c.3': '2',
'nested.2.c.4': '3',
'nested.2.c.5': '4',
'nested.2.c.6': '5',
'nested.2.c.7': '6',
'nested.2.c.8': '7',
'nested.2.c.9': '8',
'nested.2.c.10': '9',
'nested.2.c.11': '10',
'nested.2.c.12': '11',
}
self.assertDictEqual(flat_args, expected_args)
if __name__ == '__main__':
absltest.main()
|
lab2d-main
|
dmlab2d/settings_helper_test.py
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Human testing for DMLab2d levels.
Use `[/]` to switch between players.
Use `TAB, SHIFT-TAB` to switch between levels.
Use `R` to restart level.
"""
import enum
from typing import Callable, Generator, List, Mapping, Optional
import dataclasses
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
MAX_SCREEN_WIDTH = 960
MAX_SCREEN_HEIGHT = 640
def get_direction_pressed() -> int:
"""Gets direction pressed."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_UP] or key_pressed[pygame.K_w]:
return 1
if key_pressed[pygame.K_RIGHT] or key_pressed[pygame.K_d]:
return 2
if key_pressed[pygame.K_DOWN] or key_pressed[pygame.K_s]:
return 3
if key_pressed[pygame.K_LEFT] or key_pressed[pygame.K_a]:
return 4
return 0
def get_turn_pressed() -> int:
"""Calculates turn amount."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_DELETE] or key_pressed[pygame.K_q]:
return -1
if key_pressed[pygame.K_PAGEDOWN] or key_pressed[pygame.K_e]:
return 1
return 0
def get_left_control_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_LCTRL] else 0
def get_space_key_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_SPACE] else 0
class StepType(enum.Enum):
FIRST = 0
MID = 1
LAST = 2
@dataclasses.dataclass
class Step:
env: object
reward: Optional[float]
type: StepType
player: int
episode: int
class Renderer:
"""Creates a pygame window for playing an environment."""
def __init__(self,
config: Mapping[str, str],
action_map: Mapping[str, Callable[[], int]],
rgb_observation: str = 'RGB',
player_prefixes: Optional[List[str]] = None,
frames_per_second: Optional[int] = None):
env = dmlab2d.Lab2d(runfiles_helper.find(), config)
self._player = 0
self._env = env
self._player_prefixes = player_prefixes if player_prefixes else ['']
self._rgb_observation = rgb_observation
self._action_map = action_map
self._frames_per_second = frames_per_second
self._action_names = env.action_discrete_names()
self._actions = np.zeros([len(self._action_names)], dtype=np.intc)
self._observation_names = set(self._env.observation_names())
def run(self) -> Generator[Step, None, None]:
"""Run the environment."""
self._init_pygame()
episode = 0
while episode is not None:
episode = yield from self._play_episode(episode)
def _play_episode(self, episode) -> Generator[Step, None, Optional[int]]:
"""Plays the environment for a single episode."""
self._env.start(episode, episode)
yield Step(
env=self._env,
reward=None,
type=StepType.FIRST,
player=self._player,
episode=episode)
num_players = len(self._player_prefixes)
while True:
key_pressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
return None
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_TAB:
if event.mod & pygame.KMOD_SHIFT:
self._player = (self._player + num_players - 1) % num_players
else:
self._player = (self._player + 1) % num_players
elif event.key == pygame.K_ESCAPE:
return None
elif event.key == pygame.K_r:
return episode
elif event.key == pygame.K_LEFTBRACKET:
return episode - 1
elif event.key == pygame.K_RIGHTBRACKET:
return episode + 1
else:
key_pressed = True
self._update_actions()
if self._frames_per_second or key_pressed:
self._env.act_discrete(self._actions)
status, reward = self._env.advance()
if status != dmlab2d.RUNNING:
yield Step(
env=self._env,
reward=reward,
type=StepType.LAST,
player=self._player,
episode=episode)
return episode + 1
yield Step(
env=self._env,
reward=reward,
type=StepType.MID,
player=self._player,
episode=episode)
self._update_screen()
if self._frames_per_second:
self._clock.tick(self._frames_per_second)
else:
self._clock.tick(60)
return None
def _init_pygame(self):
"""Constructs pygame window based on first player's observation spec."""
pygame.init()
pygame.display.set_caption(self._env.name())
scale = 1
prefix = self._player_prefixes[self._player]
if prefix + self._rgb_observation in self._observation_names:
obs_spec = self._env.observation_spec(prefix + self._rgb_observation)
elif self._rgb_observation in self._observation_names:
obs_spec = self._env.observation_spec(self._rgb_observation)
else:
raise ValueError(f'Cannot find observation {self._rgb_observation}')
observation_shape = obs_spec['shape']
observation_height = observation_shape[0]
observation_width = observation_shape[1]
scale = min(MAX_SCREEN_HEIGHT // observation_height,
MAX_SCREEN_WIDTH // observation_width)
self._game_display = pygame.display.set_mode(
(observation_width * scale, observation_height * scale))
self._scale = scale
self._clock = pygame.time.Clock()
def _update_actions(self):
"""Reads action map and applies to current player."""
prefix = self._player_prefixes[self._player]
for i, name in enumerate(self._action_names):
if not name.startswith(prefix):
continue
action = name[len(prefix):]
if action in self._action_map:
self._actions[i] = self._action_map[action]()
def _player_observation(self):
"""Return observation of current player."""
prefix = self._player_prefixes[self._player]
if prefix + self._rgb_observation in self._observation_names:
return self._env.observation(prefix + self._rgb_observation)
elif self._rgb_observation in self._observation_names:
return self._env.observation(self._rgb_observation)
raise ValueError(
f'Cannot find observation {prefix + self._rgb_observation}')
def _update_screen(self):
# PyGame is column major!
obs = np.transpose(self._player_observation(), (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (rect[2] * self._scale, rect[3] * self._scale))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
|
lab2d-main
|
dmlab2d/ui_renderer.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for flattening dictionary settings."""
import numbers
from typing import Mapping, Sequence
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
"""Helper function for flatten_args. See `flatten_args` below for details."""
for key, v in pairs_in:
if not isinstance(key, str):
raise ValueError('Keys must be strings. %r' % key)
flat_key = prefix + '.' + key if prefix else key
if v is None:
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = 'true' if v else 'false'
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if not any(v is entry for entry in visited_stack):
_flatten_args(v.items(), args_out, flat_key, visited_stack + [v])
elif isinstance(v, Sequence):
if not any(v is entry for entry in visited_stack):
_flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out,
flat_key, visited_stack + [v])
else:
raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format(
flat_key, str(type(v))))
def flatten_args(args_in):
"""Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values.
"""
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out
|
lab2d-main
|
dmlab2d/settings_helper.py
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Lab2D environment."""
from typing import Collection, Sequence, Tuple, Union
import dm_env
import numpy as np
import dmlab2d.dmlab2d_pybind as dmlab2d_pybind
Lab2d = dmlab2d_pybind.Lab2d
EnvironmentStatus = dmlab2d_pybind.EnvironmentStatus
RUNNING = dmlab2d_pybind.RUNNING
TERMINATED = dmlab2d_pybind.TERMINATED
INTERRUPTED = dmlab2d_pybind.INTERRUPTED
PropertyAttribute = dmlab2d_pybind.PropertyAttribute
class Environment(dm_env.Environment):
"""Environment class for DeepMind Lab2D.
This environment extends the `dm_env` interface with additional methods.
For details, see https://github.com/deepmind/dm_env
"""
def __init__(self, env, observation_names, seed=None):
"""DeepMind Lab2D environment.
Args:
env: dmlab2d.Lab2d -
observation_names: List<string>
seed: int - seed the environment.
"""
self._env = env
self._obs_names = observation_names
self._rng = np.random.RandomState(seed=seed)
self._next_episode = 0
self._reset_next_step = True
self._status = RUNNING
action_discrete_names = self._env.action_discrete_names()
action_continuous_names = self._env.action_continuous_names()
action_text_names = self._env.action_text_names()
self._observation_spec = self._make_observation_spec()
self._action_spec = self._make_action_spec(action_discrete_names,
action_continuous_names,
action_text_names)
self._act_discrete_map = {
name: i for i, name in enumerate(action_discrete_names)
}
self._act_continuous_map = {
name: i for i, name in enumerate(action_continuous_names)
}
self._act_text_map = {name: i for i, name in enumerate(action_text_names)}
self._act_discrete = np.zeros(len(action_discrete_names), dtype='int32')
self._act_continuous = np.zeros(
len(action_continuous_names), dtype='float64')
self._act_text = np.array([b'' for _ in range(len(action_text_names))],
dtype=object)
def reset(self):
"""See base class."""
self._reset_next_step = False
self._env.start(self._next_episode, seed=self._rng.randint(0, 2**31))
self._next_episode += 1
return dm_env.restart(self.observation())
def _read_action(self, spec, action):
if isinstance(spec, list):
for spec_i, act_i in zip(spec, action):
self._read_action(spec_i, act_i)
elif isinstance(spec, dict):
for spec_key in spec:
if spec_key in action:
self._read_action(spec[spec_key], action[spec_key])
else:
if spec.dtype == np.dtype('int32'):
self._act_discrete[self._act_discrete_map[spec.name]] = action
elif spec.dtype == np.dtype('float64'):
self._act_continuous[self._act_continuous_map[spec.name]] = action
elif spec.dtype == np.dtype('S'):
if isinstance(action, np.ndarray):
self._act_text[self._act_text_map[spec.name]] = action.tobytes()
elif isinstance(action, str):
self._act_text[self._act_text_map[spec.name]] = action.encode()
elif isinstance(action, bytes):
self._act_text[self._act_text_map[spec.name]] = action
else:
raise TypeError(f'Unexpected type {type(action)!r}')
def step(self, action):
"""See base class."""
if self._reset_next_step:
return self.reset()
self._read_action(self._action_spec, action)
self._env.act_discrete(self._act_discrete)
self._env.act_continuous(self._act_continuous)
self._env.act_text(self._act_text)
self._status, reward = self._env.advance()
if self._status != RUNNING:
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation())
else:
return dm_env.transition(reward=reward, observation=self.observation())
def observation(self):
"""Returns the observation resulting from the last step or reset call."""
return {
name: np.asarray(
self._env.observation(name), self._observation_spec[name].dtype)
for name in self._obs_names
}
def observation_spec(self):
"""See base class."""
return self._observation_spec
def _make_observation_spec(self):
observations = {}
for name in self._obs_names:
spec = self._env.observation_spec(name)
observations[name] = dm_env.specs.Array(
shape=spec['shape'], dtype=spec['dtype'], name=name)
return observations
def _make_action_spec(self, action_discrete_names, action_continuous_names,
action_text_names):
action_spec = {}
for name in action_discrete_names:
spec = self._env.action_discrete_spec(name)
action_spec[name] = dm_env.specs.BoundedArray(
dtype=np.dtype('int32'),
shape=(),
name=name,
minimum=spec['min'],
maximum=spec['max'])
for name in action_continuous_names:
spec = self._env.action_continuous_spec(name)
action_spec[name] = dm_env.specs.BoundedArray(
dtype=np.dtype('float64'),
shape=(),
name=name,
minimum=spec['min'],
maximum=spec['max'])
for name in action_text_names:
action_spec[name] = dm_env.specs.Array(
dtype=np.dtype(object), shape=(), name=name)
return action_spec
def action_spec(self):
"""See base class."""
return self._action_spec
def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:
"""Returns the events generated after last reset or step.
Returns:
(name, observations) pairs for all events generated after last step or
reset.
"""
return self._env.events()
def list_property(
self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:
"""Returns a list of the properties under the specified key name.
Args:
key: prefix of property keys to return to search under. The empty string
can be used as the root.
Returns:
(key, attribute) pairs of all properties under input key.
Raises:
KeyError: The property does not exist or is not listable.
"""
return self._env.list_property(key)
def write_property(self, key: str, value: str) -> None:
"""Writes a property.
Args:
key: the name to write to.
value: the value to write.
Raises:
KeyError: The property does not exist or is not readable.
"""
self._env.write_property(key, value)
def read_property(self, key: str) -> str:
"""Returns the value of a given property (converted to a string).
Args:
key: The property to read.
Raises:
KeyError: The property does not exist or is not writable.
"""
return self._env.read_property(key)
|
lab2d-main
|
dmlab2d/__init__.py
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to find runfiles location."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from dmlab2d import dmlab2d_pybind
def _find_sub_directory(path, sub_directory):
"""Reverse walks `path` to find `sub_directory`.
Args:
path: Path to look for sub_directory in.
sub_directory: Name of subdirectory to search for.
Returns:
Returns full path to `sub_directory` if found otherwise None.
"""
while path:
result = os.path.join(path, sub_directory)
if os.path.isdir(result):
return result
last_path = path
path = os.path.dirname(last_path)
if last_path == path:
return None # At root.
return None
def find():
"""Returns path to folder containing DMLab2D assets.
Raises:
FileNotFoundError: The assets could not be found.
"""
return os.path.abspath(os.path.dirname(dmlab2d_pybind.__file__) + '/..')
def find_directory(sub_directory):
"""Searches for `sub_directory` heuristically.
Searches for `sub_directory` folder in possible built-in data dependency
directories, sys.path, working directory and absolute path.
Args:
sub_directory: Name of subdirectory that must exist.
Returns:
A path to an existing directory with suffix `sub_directory` or None.
"""
sub_directory = sub_directory or ''
# Try using environment variable created when running tests.
data_directory = os.environ.get('TEST_SRCDIR')
if data_directory:
return os.path.join(data_directory, sub_directory)
# Try using environment variable created by bazel run.
data_directory = _find_sub_directory(
os.environ.get('RUNFILES_MANIFEST_FILE'), sub_directory)
if data_directory:
return data_directory
# Try using path to current executable.
data_directory = _find_sub_directory(sys.argv[0], sub_directory)
if data_directory:
return data_directory
# Try using path to module.
data_directory = _find_sub_directory(
os.path.dirname(dmlab2d_pybind.__file__), sub_directory)
if data_directory:
return data_directory
# Try using path to working directory.
data_directory = _find_sub_directory(os.getcwd(), sub_directory)
if data_directory:
return data_directory
# Try using relative path directly.
data_directory = os.path.join(os.getcwd(), sub_directory)
if os.path.isdir(data_directory):
return data_directory
# Try using search path.
for path in sys.path:
data_directory = _find_sub_directory(path, sub_directory)
if data_directory:
return data_directory
data_directory = os.path.join(path, sub_directory)
if os.path.isdir(data_directory):
return data_directory
# Try using absolute path.
if os.path.isdir(sub_directory):
return sub_directory
return None
|
lab2d-main
|
dmlab2d/runfiles_helper.py
|
# Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.dmlab2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
import dmlab2d
from dmlab2d import runfiles_helper
class Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
lab2d = dmlab2d.Lab2d(runfiles_helper.find(),
{'levelName': 'examples/level_api'})
return dmlab2d.Environment(lab2d, lab2d.observation_names(), 0)
class Dmlab2DTest(absltest.TestCase):
def _create_env(self, extra_settings=None):
settings = extra_settings.copy() if extra_settings else {}
settings['levelName'] = 'examples/level_api'
return dmlab2d.Lab2d(runfiles_helper.find(), settings)
def test_lab2d_environment_name(self):
self.assertEqual(self._create_env().name(), 'dmlab2d')
def test_lab2d_observation_names(self):
env = self._create_env()
self.assertEqual(env.observation_names(),
['VIEW' + str(i) for i in range(1, 6)])
def test_lab2d_observation_spec(self):
env = self._create_env()
self.assertEqual(
env.observation_spec('VIEW1'), {
'dtype': np.dtype('uint8'),
'shape': (1,)
})
self.assertEqual(
env.observation_spec('VIEW2'), {
'dtype': np.dtype('double'),
'shape': (2,)
})
self.assertEqual(
env.observation_spec('VIEW3'), {
'dtype': np.dtype('int32'),
'shape': (3,)
})
self.assertEqual(
env.observation_spec('VIEW4'), {
'dtype': np.dtype('int64'),
'shape': (4,)
})
# Text is stored in objects.
self.assertEqual(
env.observation_spec('VIEW5'), {
'dtype': np.dtype('O'),
'shape': ()
})
def test_lab2d_action_spec(self):
env = self._create_env()
self.assertEqual(env.action_discrete_names(), ['REWARD_ACT'])
self.assertEqual(
env.action_discrete_spec('REWARD_ACT'), {
'min': 0,
'max': 4
})
self.assertEqual(env.action_continuous_names(), ['OBSERVATION_ACT'])
self.assertEqual(
env.action_continuous_spec('OBSERVATION_ACT'), {
'min': -5,
'max': 5
})
self.assertEqual(env.action_text_names(), ['LOG_EVENT'])
def test_lab2d_start_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
def test_lab2d_events_start(self):
env = self._create_env()
env.start(episode=0, seed=0)
events = env.events()
self.assertLen(events, 1)
event_name, observations = events[0]
self.assertEqual(event_name, 'start')
self.assertLen(observations, 1)
np.testing.assert_array_equal(observations[0], [1, 2, 3])
def test_lab2d_events_cleared_after_advance_not_read(self):
env = self._create_env()
env.start(episode=0, seed=0)
self.assertLen(env.events(), 1)
self.assertLen(env.events(), 1)
env.advance()
self.assertEmpty(env.events())
def test_lab2d_observe(self):
env = self._create_env()
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW1'), [1])
np.testing.assert_array_equal(env.observation('VIEW2'), [1, 2])
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
np.testing.assert_array_equal(env.observation('VIEW4'), [1, 2, 3, 4])
self.assertEqual(env.observation('VIEW5'), b'')
def test_lab2d_ten_steps_terminate_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
for _ in range(9):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_settings_environment(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
for _ in range(4):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_properties_environment(self):
env = self._create_env({'steps': '5'})
properties = env.list_property('')
self.assertLen(properties, 1)
self.assertEqual(properties[0],
('steps', dmlab2d.PropertyAttribute.READABLE_WRITABLE))
self.assertEqual(env.read_property('steps'), '5')
env.write_property('steps', '3')
self.assertEqual(env.read_property('steps'), '3')
env.start(episode=0, seed=0)
for _ in range(2):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_act_discrete(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
env.act_discrete(np.array([2], np.dtype('int32')))
_, reward = env.advance()
self.assertEqual(reward, 2)
def test_lab2d_act_continuous(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
env.act_continuous([10])
env.advance()
np.testing.assert_array_equal(env.observation('VIEW3'), [11, 12, 13])
def test_lab2d_act_text(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
view = env.observation('VIEW5')
self.assertEqual(view, b'')
env.act_text(['Hello'])
env.advance()
view = env.observation('VIEW5')
self.assertEqual(view, b'Hello')
def test_lab2d_invalid_setting(self):
with self.assertRaises(ValueError):
self._create_env({'missing': '5'})
def test_lab2d_bad_action_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.action_discrete_spec('bad_key')
with self.assertRaises(KeyError):
env.action_continuous_spec('bad_key')
def test_lab2d_bad_observation_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.observation_spec('bad_key')
def test_lab2d_observe_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.observation('VIEW1')
def test_lab2d_act_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.act_discrete([0])
with self.assertRaises(RuntimeError):
env.act_continuous([0])
with self.assertRaises(RuntimeError):
env.act_text([''])
def test_lab2d_act_bad_shape(self):
env = self._create_env()
env.start(0, 0)
with self.assertRaises(ValueError):
env.act_discrete([0, 1])
with self.assertRaises(ValueError):
env.act_continuous([0, 1])
def test_lab2d_advance_after_episode_ends(self):
env = self._create_env({'steps': '2'})
env.start(0, 0)
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
with self.assertRaises(RuntimeError):
env.advance()
def test_lab2d_missing_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(KeyError):
env.list_property('missing')
with self.assertRaises(KeyError):
env.read_property('missing')
with self.assertRaises(KeyError):
env.write_property('missing', '10')
def test_lab2d_invalid_ops_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(ValueError):
env.list_property('steps')
with self.assertRaises(ValueError):
env.write_property('steps', 'mouse')
if __name__ == '__main__':
absltest.main()
|
lab2d-main
|
dmlab2d/dmlab2d_test.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.lib.system.generators.pushbox.python.pushbox."""
from absl.testing import absltest
from dmlab2d.lib.system.generators.pushbox.python import pushbox
class PythonPushboxTest(absltest.TestCase):
def test_generate_level(self):
level = pushbox.Generate(seed=10, height=11, width=14, num_boxes=5)
# Test goal count
self.assertEqual(level.count('X') + level.count('&'), 5)
# Test box count
self.assertEqual(level.count('B') + level.count('&'), 5)
# Test spawn point count
self.assertEqual(level.count('P'), 1)
self.assertLen(level.split('\n'), 11)
self.assertEqual(level.find('\n'), 14)
def test_bad_inputs(self):
with self.assertRaises(ValueError):
pushbox.Generate(seed=10, height=5, width=5, num_boxes=36)
if __name__ == '__main__':
absltest.main()
|
lab2d-main
|
dmlab2d/lib/system/generators/pushbox/python/pushbox_test.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire clean.
Use `LEFT_CTRL` to fire fine.
Use `TAB` to switch between players.
Use `[]` to switch between levels.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'fireClean': ui_renderer.get_space_key_pressed,
'fireFine': ui_renderer.get_left_control_pressed
}
_FRAMES_PER_SECOND = 8
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
total_contrib = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) contrib({total_contrib[idx]}) score({score[idx]})')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
contrib = step.env.observation(prefix + 'CONTRIB')
total_contrib[idx] += contrib
if step.player == idx and (reward != 0 or contrib != 0):
print_player = True
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=4, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'clean_up'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/lib/game_scripts/levels/clean_up/play.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Key-map:
* `WASD` keys to move the character around.
* `Q and E` to turn the character.
* `SPACE` to fire fine.
* `TAB` to switch between players.
* `[]` to switch between levels.
* `R` to restart a level.
* `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'zap': ui_renderer.get_space_key_pressed
}
_FRAMES_PER_SECOND = 8
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
zap_matrix = None
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) score({score[idx]}) zap(\n{zap_matrix}))')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
matrix = step.env.observation('WORLD.ZAP_COUNT')
zap_matrix = zap_matrix + matrix if zap_matrix is not None else matrix
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
print_player = print_player or (step.player == idx and reward != 0)
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=4, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'commons_harvest'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/lib/game_scripts/levels/commons_harvest/play.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `[]` to switch between episodes.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {'MOVE': ui_renderer.get_direction_pressed}
def _run(config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
ui = ui_renderer.Renderer(
config=config, action_map=_ACTION_MAP, rgb_observation='WORLD.RGB')
scores = dict()
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
else:
scores[step.episode] = scores.get(step.episode, 0) + step.reward
print(f'Episode({step.episode}), Score ({scores[step.episode]})')
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
print('=== Exiting ===')
for episode in sorted(scores):
print(f'Episode({episode}), Score ({scores[episode]})')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
args = parser.parse_args()
args.settings['levelName'] = 'chase_eat'
_run(args.settings)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/lib/game_scripts/levels/chase_eat/play.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `[]` to switch between episodes.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {'MOVE': ui_renderer.get_direction_pressed}
def _run(config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
ui = ui_renderer.Renderer(
config=config, action_map=_ACTION_MAP, rgb_observation='WORLD.RGB')
scores = dict()
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
else:
scores[step.episode] = scores.get(step.episode, 0) + step.reward
print(f'Episode({step.episode}), Score ({scores[step.episode]})')
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
print('=== Exiting ===')
for episode in sorted(scores):
print(f'Episode({episode}), Score ({scores[episode]})')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
args = parser.parse_args()
args.settings['levelName'] = 'pushbox'
_run(args.settings)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/lib/game_scripts/levels/pushbox/play.py
|
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `clean_up` level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire clean.
Use `LEFT_CTRL` to fire fine.
Use `TAB` to switch between players.
Use `[]` to switch between levels.
Use `R` to restart a level.
Use `ESCAPE` to quit.
"""
import argparse
import collections
import json
from typing import Mapping
from dmlab2d import ui_renderer
_ACTION_MAP = {
'move': ui_renderer.get_direction_pressed,
'turn': ui_renderer.get_turn_pressed,
'fireZap': ui_renderer.get_space_key_pressed
}
_FRAMES_PER_SECOND = None
def _run(rgb_observation: str, config: Mapping[str, str]):
"""Run multiplayer environment, with per player rendering and actions."""
player_count = int(config.get('numPlayers', '1'))
score = collections.defaultdict(float)
prefixes = [str(i + 1) + '.' for i in range(player_count)]
ui = ui_renderer.Renderer(
config=config,
action_map=_ACTION_MAP,
rgb_observation=rgb_observation,
player_prefixes=[str(i + 1) + '.' for i in range(player_count)],
frames_per_second=_FRAMES_PER_SECOND)
def player_printer(idx: int):
print(f'Player({idx}) score({score[idx]})')
for step in ui.run():
if step.type == ui_renderer.StepType.FIRST:
print(f'=== Start episode {step.episode} ===')
print_player = False
for idx, prefix in enumerate(prefixes):
reward = step.env.observation(prefix + 'REWARD')
score[idx] += reward
if step.player == idx and reward != 0:
print_player = True
if print_player:
player_printer(step.player)
if step.type == ui_renderer.StepType.LAST:
print(f'=== End episode {step.episode} ===')
for idx in range(player_count):
player_printer(idx)
print('======')
print('=== Exiting ===')
for idx in range(player_count):
player_printer(idx)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--players', type=int, default=2, help='Number of players.')
args = parser.parse_args()
if 'levelName' not in args.settings:
args.settings['levelName'] = 'running_with_scissors'
if 'numPlayers' not in args.settings:
args.settings['numPlayers'] = args.players
for k in args.settings:
args.settings[k] = str(args.settings[k])
_run(args.observation, args.settings)
if __name__ == '__main__':
main()
|
lab2d-main
|
dmlab2d/lib/game_scripts/levels/running_with_scissors/play.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_namespace_packages
from setuptools import setup
def _get_version():
with open('synjax/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g) # pylint: disable=exec-used
return g['__version__']
raise ValueError('`__version__` not defined in `synjax/__init__.py`')
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
_VERSION = _get_version()
setup(
name='synjax',
version=_VERSION,
url='https://github.com/deepmind/synjax',
license='Apache 2.0',
author='DeepMind',
description='SynJax: structured probability distributions for JAX.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='stanojevic@google.com',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py', 'examples']),
install_requires=_parse_requirements('requirements.txt'),
extras_require={'jax': _parse_requirements('requirements-jax.txt')},
tests_require=_parse_requirements('requirements-test.txt'),
requires_python='>=3.8',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
|
synjax-master
|
setup.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SynJax is a library for structured prediction."""
# pylint: disable=g-multiple-import, g-importing-member
from synjax import semirings
from synjax import special
from synjax._src.alignment_monotone_general import GeneralMonotoneAlignmentCRF
from synjax._src.alignment_simple import AlignmentCRF
from synjax._src.config import get_config, set_config, config_context
from synjax._src.constituency_pcfg import PCFG, GeneralizedPCFG
from synjax._src.constituency_tensor_decomposition_pcfg import TensorDecompositionPCFG, GeneralizedTensorDecompositionPCFG
from synjax._src.constituency_tree_crf import TreeCRF
from synjax._src.ctc import CTC
from synjax._src.distribution import Distribution, SemiringDistribution
from synjax._src.hmm import HMM
from synjax._src.linear_chain_crf import LinearChainCRF
from synjax._src.semi_markov_crf import SemiMarkovCRF
from synjax._src.spanning_tree_crf import SpanningTreeCRF
__version__ = "2023.8.5"
__all__ = (
"Distribution",
"SemiringDistribution",
"AlignmentCRF",
"GeneralMonotoneAlignmentCRF",
"CTC",
"SpanningTreeCRF",
"LinearChainCRF",
"SemiMarkovCRF",
"HMM",
"TreeCRF",
"PCFG",
"GeneralizedPCFG",
"TensorDecompositionPCFG",
"GeneralizedTensorDecompositionPCFG",
"get_config",
"set_config",
"config_context",
"semirings",
"special",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the SynJax public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
|
synjax-master
|
synjax/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semiring interface and einsum functions."""
import jax
import jax.numpy as jnp
# pylint: disable=g-importing-member
from synjax._src.utils.semirings import einsum_builder
from synjax._src.utils.semirings import LogSemiring
from synjax._src.utils.semirings import MaxSemiring
from synjax._src.utils.semirings import Semiring
einsum_log = einsum_builder(jax.nn.logsumexp, jnp.add)
einsum_max = einsum_builder(jnp.max, jnp.add)
__all__ = [
"Semiring",
"MaxSemiring",
"LogSemiring",
"einsum_builder",
"einsum_log",
"einsum_max",
]
|
synjax-master
|
synjax/semirings.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SynJax."""
from absl.testing import absltest
import synjax
class SynJaxTest(absltest.TestCase):
"""Test synjax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(synjax, 'SpanningTreeCRF'))
if __name__ == '__main__':
absltest.main()
|
synjax-master
|
synjax/synjax_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generally useful small functions."""
from synjax._src.utils.special import inv
from synjax._src.utils.special import safe_log
from synjax._src.utils.special import safe_slogdet
__all__ = [
"safe_slogdet",
"safe_log",
"inv",
]
|
synjax-master
|
synjax/special.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution of monotone alignments between two sequences."""
# pylint: disable=g-multiple-import, g-importing-member
from typing import Optional, Tuple, List
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32, PyTree
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import semirings
class GeneralMonotoneAlignmentCRF(SemiringDistribution):
"""Distribution of monotone alignments between elements of two sequences.
It is similar to String-Edit Distance CRF from McCallum et al (2005),
but more powerful in some aspects because it can represent alignments with
bigger diagonal jumps that are needed for distributions like CTC in case of
blank symbols.
References:
McCallum et al, 2005: https://people.cs.umass.edu/~mccallum/papers/crfstredit-uai05.pdf
""" # pylint: disable=line-too-long
log_potentials_horizontal: Tuple[Float[Array, "*batch row col"], ...]
log_potentials_vertical: Optional[Float[Array, "*batch row col"]]
lengths_rows: Int32[Array, "*batch"]
lengths_cols: Int32[Array, "*batch"]
@typed
def __init__(
self,
log_potentials_horizontal: PyTree[Float[Array, "*batch row col"]],
# NOTE The type of log_potentials_horizontal here is PyTree instead of
# tuple because jaxtyping/typeguard sometimes fails in checking shapes
# otherwise. Tuple type is check explicitly later with isinstance.
log_potentials_vertical: Optional[Float[Array, "*batch row col"]], *,
lengths_rows: Optional[Int32[Array, "*batch"]] = None,
lengths_cols: Optional[Int32[Array, "*batch"]] = None):
"""Creates an AlignmentCRF distribution.
Args:
log_potentials_horizontal:
Tuple of jax.Arrays that specifies the lot-potentials for making one
horizontal move + i numbers of vertical moves where i is the position
of the array in the tuple. For example, the array
log_potentials_horizontal[0] specifies strictly horizontal moves
(1 horizontal + 0 vertical), log_potentials_horizontal[1] specifies
strictly diagonal moves (1 horizontal + 1 vertical), log_potentials[2]
specifies even more tilted diagonal moves (1 horizontal + 2 vertical)
etc. The number of arrays has to be at least 1, in case
log_potentials_vertical is also provided, or at least 2 in case there is
not log_potentials_vertical.
log_potentials_vertical:
Optional jax.Array that specifies the log-potentials for moving
vertically in the alignment matrix.
lengths_rows: Optional jax.Array with the number of rows in each instance.
lengths_cols: Optional jax.Array with the number of columns in
each instance.
"""
super().__init__(log_potentials=None, struct_is_isomorphic_to_params=False)
if not isinstance(log_potentials_horizontal, tuple):
raise ValueError("log_potentials_horizontal must be a tuple.")
if len(log_potentials_horizontal)+(log_potentials_vertical is not None) < 2:
# Explicit check needed here because jaxtyping checks fail sometimes.
raise ValueError("Arguments log-potentials must have the same shape.")
rows, cols = log_potentials_horizontal[0].shape[-2:]
if (log_potentials_vertical is None and lengths_cols is None
and lengths_rows is None and rows >= cols):
raise ValueError("This is a useless distribution because there is "
"less than two alignment possible.")
batch_shape = log_potentials_horizontal[0].shape[:-2]
if lengths_rows is None:
lengths_rows = jnp.full(batch_shape, rows)
if lengths_cols is None:
lengths_cols = jnp.full(batch_shape, cols)
self.log_potentials_horizontal = log_potentials_horizontal
self.log_potentials_vertical = log_potentials_vertical
self.lengths_rows = lengths_rows
self.lengths_cols = lengths_cols
@property
def batch_shape(self) -> Shape:
return self.log_potentials_horizontal[0].shape[:-2]
@property
def event_shape(self) -> Shape:
return self.log_potentials_horizontal[0].shape[-2:]
@typed
def unnormalized_log_prob(self, event: Float[Array, "*samples_batch row col"],
**kwargs) -> Float[Array, "*samples_batch"]:
lp, lp_vert = self._masked_params()
scores = jnp.zeros(event.shape[:-2])
if lp_vert is not None:
is_vertical = (event * jnp.roll(event, 1, -2)).at[..., 0, :].set(0)
scores += jnp.sum(lp_vert * is_vertical, (-1, -2))
event_leaving = (jnp.argmax(jnp.cumsum(event, -2), -2, keepdims=True)
== jnp.arange(event.shape[-2])[:, None]).astype(jnp.int32)
event_leaving_shifted = jnp.roll(event_leaving, 1, -1).at[..., 0].set(0)
event_entering = (jnp.argmax(event, -2, keepdims=True)
== jnp.arange(event.shape[-2])[:, None]).astype(jnp.int32)
for i, lp_i in enumerate(lp):
is_active = event_entering * jnp.roll(event_leaving_shifted, i, -2)
scores += jnp.sum(lp_i * is_active, (-1, -2))
return scores
@typed
def _structure_forward(
self, base_struct: Float[Array, "row col"], semiring: semirings.Semiring,
key: Key) -> Float[Array, "s"]:
rows, cols = self.event_shape
init_state = semiring.wrap(jnp.full(rows, -INF).at[0].set(0))
keys = jax.random.split(key, cols*len(self.log_potentials_horizontal)
).reshape(cols, -1, 2)
def loop(state, inp):
scores, scores_vert, keys = inp
out_state = semiring.mul(state, scores[0])
for shift, score in enumerate(scores[1:], 1):
transition = semiring.mul(score, jnp.roll(state, shift=shift, axis=-1))
out_state = semiring.add(out_state, transition, key=keys[shift])
if scores_vert is not None:
def vertical_loop(up, inp2):
curr, weight, key = inp2
curr = semiring.add(curr, semiring.mul(weight, up), key=key)
return curr, curr
subkeys = jax.random.split(keys[0], rows)
out_state = jax.lax.scan(vertical_loop, semiring.zero(),
(out_state.T, scores_vert.T, subkeys))[1].T
return out_state, out_state
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
lp, lp_vert = self._masked_params()
lp = [jnp.moveaxis(semiring.wrap(x+base_struct), -1, 0) for x in lp]
if lp_vert is not None:
lp_vert = jnp.moveaxis(semiring.wrap(lp_vert+base_struct), -1, 0)
_, outputs = jax.lax.scan(loop, init_state, (lp, lp_vert, keys))
return outputs[self.lengths_cols-1, :, self.lengths_rows-1]
@typed
def _masked_params(self) -> Tuple[List[Float[Array, "*batch row col"]],
Optional[Float[Array, "*batch row col"]]]:
rows, _ = self.event_shape
lp_h, lp_v = self.log_potentials_horizontal, self.log_potentials_vertical
lp_h = [x.at[..., 0].set(-INF * (jnp.arange(rows) > 0)) for x in lp_h]
lp_h = [jnp.where(jnp.arange(rows)[:, None] >= i, x, -INF)
for i, x in enumerate(lp_h)]
if lp_v is not None:
lp_v = lp_v.at[..., 0, :].set(-INF)
return lp_h, lp_v
|
synjax-master
|
synjax/_src/alignment_monotone_general.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution representing projective dependency trees."""
# pylint: disable=g-multiple-import, g-importing-member
from typing import Literal, Optional, Tuple
import warnings
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.deptree_algorithms import deptree_padding
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import chart_struct
from synjax._src.utils.semirings import Semiring, MaxSemiring
Chart = chart_struct.Chart
class SpanningTreeProjectiveCRF(SemiringDistribution):
"""Distribution representing projective dependency trees."""
single_root_edge: bool = eqx.static_field()
lengths: Int32[Array, "*batch"]
@typed
def __init__(self, log_potentials: Float[Array, "*batch n n"],
*,
single_root_edge: bool,
lengths: Optional[Int32[Array, "*batch"]] = None):
self.single_root_edge = single_root_edge
if lengths is None:
lengths = jnp.full(log_potentials.shape[:-2], log_potentials.shape[-1])
self.lengths = lengths
super().__init__(log_potentials=deptree_padding.pad_log_potentials(
log_potentials, self.lengths))
@property
def event_shape(self) -> Shape:
return self.log_potentials.shape[-2:]
@typed
def _structure_forward(
self, base_struct: Float[Array, "n n"], semiring: Semiring, key: Key,
algorithm: Optional[Literal["Kuhlmann", "Eisner"]] = None
) -> Float[Array, "s"]:
if algorithm is None:
if isinstance(semiring, MaxSemiring):
algorithm = get_config().projective_argmax_algorithm
else:
algorithm = "Eisner"
if algorithm == "Kuhlmann" and not isinstance(semiring, MaxSemiring):
warnings.warn(
"Kuhlmann's arc-hybrid algorithm does not provide correct results"
" for any semiring except MaxSemiring due to spurious ambiguity.")
if algorithm == "Eisner":
return self._structure_forward_Eisner(base_struct, semiring, key)
elif algorithm == "Kuhlmann":
return self._structure_forward_Kuhlmann_arc_hybrid(
base_struct, semiring, key)
else:
raise ValueError(f"Unknown algorithm {algorithm}.")
@typed
def _structure_forward_Kuhlmann_arc_hybrid( # pylint: disable=invalid-name
self, base_struct: Float[Array, "n n"], semiring: Semiring, key: Key
) -> Float[Array, "s"]:
"""Kuhlmann et al (2011) arc-hybrid parsing algorithm.
Fast in practice, but should be used only with MaxSemiring because it has
multiiple derivations for the same tree causing the partition function
computation of other semirings to be incorrect. Simple visual depiction of
the algorithm is present in Shi et al (2017).
References:
Shi et al, 2017 - Figure 1b: https://aclanthology.org/D17-1002.pdf#page=5
Kuhlmann et al, 2011: https://aclanthology.org/P11-1068.pdf
Args:
base_struct: Zero tensor for tracking gradients by being glued to the
structure of the computation.
semiring: Used semiring.
key: Random key.
Returns:
Partition function with the provided semiring.
"""
# See Figure 1b in Shi et al 2017 for good illustration of the algorithm.
n = self.log_potentials.shape[-1]-1 # Number of words excluding ROOT node.
if self.single_root_edge:
# Apply Reweighting algorithm from Stanojević and Cohen 2021.
# https://aclanthology.org/2021.emnlp-main.823.pdf
lp = jnp.clip(self.log_potentials, -INF/100)
c = jax.lax.stop_gradient(n*(jnp.max(lp) - jnp.min(lp))+1)
else:
c = 0
params = base_struct+self.log_potentials.at[0].add(-c)
params_extended = jnp.full((n+2, n+2), -INF).at[:n+1, :n+1].set(params)
lr_arcs = chart_struct.from_cky_table(semiring.wrap(params_extended))
rl_arcs = chart_struct.from_cky_table(semiring.wrap(params_extended.T))
init_chart = chart_struct.from_cky_table(semiring.wrap(
INF*(jnp.eye(n+2, k=1)-1)))
keys = jax.random.split(key, 2*n)
def loop(chart: chart_struct.Chart, d):
lr = lr_arcs.left()
rl = rl_arcs.right_non_empty(d, semiring)
left = chart.left()
right = chart.right_non_empty(d, semiring)
score = semiring.add(lr, rl, key=keys[2*d])
entries = semiring.einsum("sij,sij,sij->si", left, right, score,
key=keys[2*d+1])
return chart.set_entries(d, entries), None
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
chart, _ = jax.lax.scan(loop, init_chart, jnp.arange(3, n+3))
return chart.get_entries(n+2)[:, 0] + c
@typed
def _structure_forward_Eisner( # pylint: disable=invalid-name
self, base_struct: Float[Array, "n n"], semiring: Semiring, key: Key
) -> Float[Array, "s"]:
"""Eisner's parsing algorithm.
References:
Eisner, 2000: https://www.cs.jhu.edu/~jason/papers/eisner.iwptbook00.pdf
Chen et al, 2014 - slide 20: http://ir.hit.edu.cn/~lzh/papers/coling14-tutorial-dependency-parsing-a.pdf#page=20
Args:
base_struct: Zero tensor for tracking gradients by being glued to the
structure of the computation.
semiring: Used semiring.
key: Random key.
Returns:
Partition function with the provided semiring.
""" # pylint: disable=line-too-long
params = base_struct+self.log_potentials
lr_arcs = chart_struct.from_cky_table(semiring.wrap(params))
rl_arcs = chart_struct.from_cky_table(semiring.wrap(params.T))
chart_left_incomp = chart_struct.from_cky_table(
semiring.one(self.event_shape))
chart_right_incomp = chart_struct.from_cky_table(
semiring.one(self.event_shape))
chart_left_comp = chart_struct.from_cky_table(
semiring.one(self.event_shape))
chart_right_comp = chart_struct.from_cky_table(
semiring.one(self.event_shape))
n = base_struct.shape[-1]
keys = jax.random.split(key, n+2)
state = (chart_left_incomp, chart_right_incomp,
chart_left_comp, chart_right_comp)
def loop(state: Tuple[Chart, Chart, Chart, Chart], d):
(chart_left_incomp, chart_right_incomp,
chart_left_comp, chart_right_comp) = state
akeys = jax.random.split(keys[d], 4)
content = semiring.einsum("sij,sij->si",
chart_left_comp.left(),
chart_right_comp.right(d, semiring),
key=akeys[0])
chart_left_incomp = chart_left_incomp.set_entries(
d, semiring.mul(content, lr_arcs.get_entries(d)))
chart_right_incomp = chart_right_incomp.set_entries(
d, semiring.mul(content, rl_arcs.get_entries(d)))
content = semiring.einsum("sij,sij->si",
chart_left_incomp.left_non_empty(),
chart_left_comp.right(d, semiring),
key=akeys[1])
chart_left_comp = chart_left_comp.set_entries(d, content)
content = semiring.einsum("sij,sij->si",
chart_right_comp.left(),
chart_right_incomp.right_non_empty(d, semiring),
key=akeys[2])
chart_right_comp = chart_right_comp.set_entries(d, content)
state = (chart_left_incomp, chart_right_incomp,
chart_left_comp, chart_right_comp)
return state, None
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
state, _ = jax.lax.scan(loop, state, jnp.arange(2, n+1))
(chart_left_incomp, chart_right_incomp,
chart_left_comp, chart_right_comp) = state
del chart_left_incomp, chart_right_incomp # These are not used later.
if self.single_root_edge:
left = chart_right_comp.left()[:, 1, :-1]
right = chart_left_comp.right_unmasked_non_empty(n-1)[:, 1, :-1]
arcs = semiring.wrap(params[0, 1:])
result = semiring.einsum("si,si,si->s", left, right, arcs, key=keys[n])
else:
result = chart_left_comp.get_entries(n)[:, 0]
return result
|
synjax-master
|
synjax/_src/spanning_tree_projective_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages global SynJax configuration."""
import contextlib
import dataclasses
import functools
from typing import Literal
from synjax._src.typing import typed # pylint: disable=g-importing-member
@typed
@functools.partial(dataclasses.dataclass, frozen=True)
class SynJaxConfig():
"""SynJax configuration."""
use_strict_max: bool = False
checkpoint_loops: bool = True
checkpoint_semiring_einsum: bool = True
# Matrix-Tree Theorem settings
mtt_shift_log_potentials: bool = True
mtt_logdet_method: Literal["lu", "qr"] = "lu"
mtt_inv_method: Literal["solve", "qr"] = "solve"
mtt_inv_matmul_precision: Literal["default", "high", "highest"] = "default"
# CTC settings
ctc_use_optax: bool = False
# Projective Spanning Trees settings
projective_argmax_algorithm: Literal["Kuhlmann", "Eisner"] = "Kuhlmann"
# Linear-Chain CRF settings
linear_chain_crf_forward_algorithm: Literal["sequential", "parallel"] = (
"sequential")
_config = SynJaxConfig()
def get_config() -> SynJaxConfig:
return _config
def set_config(**settings) -> None:
global _config
_config = dataclasses.replace(_config, **settings)
@contextlib.contextmanager
def config_context(**settings):
prior_settings = dataclasses.asdict(get_config())
set_config(**settings)
yield get_config()
set_config(**prior_settings)
|
synjax-master
|
synjax/_src/config.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of PCFG."""
# pylint: disable=g-multiple-import, g-importing-member
# pylint: disable=invalid-name
from typing import NamedTuple, Optional, Union
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src import distribution
from synjax._src.config import get_config
from synjax._src.typing import typed, Key, Shape
from synjax._src.utils import chart_struct
from synjax._src.utils.semirings import Semiring
from synjax._src.utils.special import roll
class Event(NamedTuple):
chart: Union[Float[Array, "*batch n n nt"], Shape]
tags: Union[Float[Array, "*batch n pt"], Shape]
class GeneralizedPCFG(distribution.SemiringDistribution):
"""Probabilistic Context-Free Grammar.
Note that this is a conditional PCFG, i.e. it is a distribution over trees
provided by PCFG conditioned by a provided sentence. Because of that calling
dist.log_probability(tree) returns a p(tree | sentence; pcfg). To get a
joint probability of a tree and a sentence p(tree, sentence ; pcfg) call
dist.unnormalized_log_probability(tree). For a short description of
normalization see Eisner (2016, S7.1), for long description see
Nederhof and Satta (2003).
References:
Eisner 2016 - Section 7.1: https://aclanthology.org/W16-5901.pdf#page=7
Nederhof and Satta 2003: https://aclanthology.org/W03-3016.pdf
"""
preterminal_scores: Float[Array, "*batch n pt"]
root: Float[Array, "*batch nt"]
rule: Float[Array, "*batch nt nt+pt nt+pt"]
lengths: Int32[Array, "*batch"]
@typed
def __init__(
self, *,
preterminal_scores: Float[Array, "*batch n pt"],
root: Float[Array, "*batch nt"],
rule: Float[Array, "*batch nt nt+pt nt+pt"],
lengths: Optional[Int32[Array, "*batch"]] = None):
super().__init__(log_potentials=None, struct_is_isomorphic_to_params=False)
self.root = jax.nn.log_softmax(root, -1)
self.rule = jax.nn.log_softmax(rule, (-1, -2))
self.preterminal_scores = preterminal_scores
if lengths is None:
lengths = jnp.full(self.batch_shape, self.size_sentence)
self.lengths = lengths
@property
def size_sentence(self) -> int:
return self.preterminal_scores.shape[-2]
@property
def size_nonterminals(self) -> int:
return self.rule.shape[-3]
@property
def size_preterminals(self) -> int:
return self.rule.shape[-2] - self.rule.shape[-3]
@property
def event_shape(self) -> Event:
chart_shape = self.size_sentence, self.size_sentence, self.size_nonterminals
preterm_shape = self.size_sentence, self.size_preterminals
return Event(chart_shape, preterm_shape)
@property
def batch_shape(self) -> Shape:
return self.rule.shape[:-3]
@typed
def _structure_forward(self, base_struct: Event, semiring: Semiring, key: Key
) -> Float[Array, "s"]:
base_chart, base_preterm = base_struct
keys = jax.random.split(key, self.size_sentence+2)
rule_x_y1_z1 = semiring.wrap(
self.rule[:, self.size_nonterminals:, self.size_nonterminals:])
rule_x_y1_z = semiring.wrap(
self.rule[:, self.size_nonterminals:, :self.size_nonterminals])
rule_x_y_z1 = semiring.wrap(
self.rule[:, :self.size_nonterminals, self.size_nonterminals:])
rule_x_y_z = semiring.wrap(
self.rule[:, :self.size_nonterminals, :self.size_nonterminals])
##########################################################################
############################ SPAN SIZE 1 ### START #######################
# This establishes the connection between the term entries and
# the diagonal in the base_struct. This info is implicitly used by
# grad to put gradients in two places.
term = semiring.wrap(self.preterminal_scores + base_preterm) # s n pt
##########################################################################
# Here a chart is constructed by cutting of the useless parts of base_struct
chart = chart_struct.from_cky_table(semiring.wrap(base_chart))
##########################################################################
############################ SPAN SIZE 2 ### START #######################
# The binary rule that has only terminals on the RHS is used
# if and only if span is of size 2.
x = semiring.einsum("siy,siz,sxyz->six",
term, roll(term, -1, axis=1), rule_x_y1_z1, key=keys[2])
chart = chart.set_entries(2, semiring.mul(chart.get_entries(2), x))
##########################################################################
def loop(chart: chart_struct.Chart, d: Array):
akey = jax.random.split(keys[d], 4)
############################### X -> Y Z #################################
y = chart.left() # S,N,N,NT
z = chart.right(d, semiring, exclude_word_nodes=True) # S,N,N,NT
xc = semiring.einsum("sijy,sijz,sxyz->six", y, z, rule_x_y_z, key=akey[0])
############################### X -> Y1 Z ###############################
y1 = term # S,N,PT
z = roll(chart.get_entries(d-1), -1, axis=1) # S,N,NT
xb = semiring.einsum("siy,siz,sxyz->six", y1, z, rule_x_y1_z, key=akey[1])
############################### X -> Y Z1 ###############################
y = chart.get_entries(d-1) # S,N,NT
z1 = roll(term, -d+1, axis=1) # S,N,PT
xa = semiring.einsum("siy,siz,sxyz->six", y, z1, rule_x_y_z1, key=akey[2])
######################### combine all variations #########################
x = semiring.add(xa, xb, xc, key=akey[3])
return chart.set_entries(d, semiring.mul(chart.get_entries(d), x)), None
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
chart, _ = jax.lax.scan(loop, chart, jnp.arange(3, self.size_sentence+1))
############################ ROOT NODE ### START #######################
lengths = self.lengths if self.lengths is not None else self.size_sentence
x = chart.pick_length(lengths) # S,NT
root = semiring.wrap(self.root) # S,NT
x = semiring.sum(semiring.mul(x, root), key=keys[-1], axis=-1) # S
############################ ROOT NODE ### END #######################
return x
class PCFG(GeneralizedPCFG):
__doc__ = GeneralizedPCFG.__doc__
word_ids: Int32[Array, "*batch n"]
emission: Float[Array, "*batch pt voc"]
@typed
def __init__(self, emission: Float[Array, "*batch pt voc"],
root: Float[Array, "*batch nt"],
rule: Float[Array, "*batch nt nt+pt nt+pt"],
word_ids: Int32[Array, "*batch n"],
lengths: Optional[Int32[Array, "*batch"]] = None):
self.word_ids = word_ids
self.emission = emission
emission = jax.nn.log_softmax(emission, -1)
preterm_scores = jnp.take_along_axis(emission, word_ids[..., None, :], -1)
preterm_scores = jnp.swapaxes(preterm_scores, -1, -2)
super().__init__(root=root, rule=rule, lengths=lengths,
preterminal_scores=preterm_scores)
|
synjax-master
|
synjax/_src/constituency_pcfg.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HMM."""
from absl.testing import absltest
import distrax
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src import hmm
from tensorflow_probability.substrates import jax as tfp
def chain_is_connected(samples) -> bool:
scores = jax.lax.associative_scan(jnp.matmul, samples, axis=-3)
scores = scores[..., -1, :, :].sum((-1, -2))
return jnp.all(scores == 1)
def tfp_marginals(
init_logits: jax.Array, transition_logits: jax.Array,
emission_dist: ..., observations: jax.Array) -> jax.Array:
tfd = tfp.distributions
initial_distribution = tfd.Categorical(logits=init_logits)
transition_distribution = tfd.Categorical(logits=transition_logits)
observation_distribution = tfd.Categorical(logits=emission_dist)
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=observations.shape[-1])
return jnp.exp(model.posterior_marginals(observations).logits)
def distrax_marginals(
init_logits: jax.Array, transition_logits: jax.Array,
emission_dist: ..., observations: jax.Array) -> jax.Array:
observation_distribution = distrax.Categorical(logits=emission_dist)
dhmm = distrax.HMM(
trans_dist=distrax.Categorical(logits=transition_logits),
init_dist=distrax.Categorical(logits=init_logits),
obs_dist=observation_distribution)
return dhmm.forward_backward(observations)[2]
class HMMTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key):
b, n, t = 3, 6, 4
keys = jax.random.split(key, 5)
dists = []
kwargs = dict(
init_logits=jax.random.uniform(keys[0], (b, t)),
transition_logits=jax.random.uniform(keys[1], (b, t, t)))
for is_categorical in [True, False]:
kwargs = dict(
init_logits=jax.random.uniform(keys[0], (b, t)),
transition_logits=jax.random.uniform(keys[1], (b, t, t)))
if is_categorical:
v = 100
kwargs["emission_dist"] = jax.random.uniform(keys[2], (b, t, v))
kwargs["observations"] = jax.random.randint(keys[3], (b, n), 0, v)
else:
d = 100
kwargs["emission_dist"] = distrax.MultivariateNormalDiag(
jax.random.uniform(keys[2], (b, t, d)),
jax.random.uniform(keys[3], (b, t, d), maxval=10),
)
kwargs["observations"] = jax.random.uniform(keys[4], (b, n, d))
dists.append(hmm.HMM(**kwargs))
return dists
def create_symmetric_batched_dists(self):
b, n, t, v = 3, 6, 4, 100
kwargs = dict(
init_logits=jnp.zeros((b, t)),
transition_logits=jnp.zeros((b, t, t)),
emission_dist=jnp.zeros((b, t, v)),
observations=jax.random.randint(jax.random.PRNGKey(0), (b, n), 0, v),
)
return [hmm.HMM(**kwargs)]
def create_invalid_shape_distribution(self):
b, n, t, v = 3, 6, 4, 100
kwargs = dict(
init_logits=jnp.zeros((b, t)),
transition_logits=jnp.zeros((b, t, t)),
emission_dist=jnp.zeros((b, t+1, v)),
observations=jax.random.randint(jax.random.PRNGKey(0), (b, n), 0, v),
)
return hmm.HMM(**kwargs)
def analytic_log_count(self, dist) -> jax.Array:
t = dist.log_potentials.shape[-1]
return dist.lengths * jnp.log(t)
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assert_allclose(marginals[..., 1:-1, :, :], marginals[..., 2:, :, :])
def assert_batch_of_valid_samples(self, dist, samples):
self.assertTrue(chain_is_connected(samples),
"The chain needs to be connected")
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(marginals.sum((-1, -2)), 1)
def test_agrees_with_tfp_and_distrax(self):
n, t, v = 6, 4, 100
keys = jax.random.split(jax.random.PRNGKey(0), 5)
kwargs = dict(
init_logits=jax.random.uniform(keys[0], (t,)),
transition_logits=jax.random.uniform(keys[1], (t, t)),
emission_dist=jax.random.uniform(keys[2], (t, v)),
observations=jax.random.randint(keys[3], (n,), 0, v))
synjax_hmm_marginals = hmm.HMM(**kwargs).marginals().sum(-2)
self.assert_allclose(synjax_hmm_marginals, distrax_marginals(**kwargs))
self.assert_allclose(synjax_hmm_marginals, tfp_marginals(**kwargs))
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/hmm_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PCFG."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import constituency_pcfg
from synjax._src import distribution_test
from synjax._src.utils import special
PCFG = constituency_pcfg.PCFG
def is_symmetric(x, axis1, axis2):
x1 = jnp.rot90(x, axes=(axis1, axis2))
x2 = jnp.swapaxes(x1, axis1, axis2)
return jnp.allclose(x2, x1)
class PcfgTest(distribution_test.DistributionTest):
def _create_dist(self, f):
b, n, nt, pt, voc = 2, 4, 2, 3, 40
log_potentials = dict(
root=f((b, nt)),
rule=f((b, nt, nt+pt, nt+pt)),
emission=f((b, pt, voc))
)
word_ids = jnp.tile(jnp.arange(n), (b, 1))
return [PCFG(**log_potentials, word_ids=word_ids)]
def create_random_batched_dists(self, key: jax.random.KeyArray):
return self._create_dist(
lambda shape: jnp.log(jax.random.uniform(key, shape)))
def create_invalid_shape_distribution(self):
b, n, nt, pt, voc = 2, 4, 2, 3, 40
f = jnp.zeros
return PCFG(root=f((b, nt)),
rule=f((b, nt, nt+pt, nt)),
emission=f((b, pt, voc)),
word_ids=jnp.tile(jnp.arange(n), (b, 1)))
def create_symmetric_batched_dists(self):
return self._create_dist(jnp.zeros)
def analytic_log_count(self, dist) -> jax.Array:
log_tree_count = special.log_catalan(dist.lengths-1)
log_nt_labeling_count = (dist.lengths-1) * jnp.log(dist.size_nonterminals)
log_t_labeling_count = dist.lengths * jnp.log(dist.size_preterminals)
return log_tree_count + log_nt_labeling_count + log_t_labeling_count
def assert_is_symmetric(self, dist, marginals) -> bool:
chart_marginals, preterm_marginals = marginals
self.assertTrue(is_symmetric(chart_marginals, 1, 2))
self.assert_allclose(preterm_marginals, preterm_marginals[..., ::-1, :])
def assert_batch_of_valid_samples(self, dist, samples):
chart_marginals, preter_marginals = samples
self.assert_allclose(jnp.sum(chart_marginals, axis=(-1, -2, -3)),
dist.lengths-1)
self.assert_allclose(preter_marginals.sum((-1, -2)), dist.lengths)
def assert_valid_marginals(self, dist, marginals):
chart_marginals, preterm_marginals = marginals
for i in range(dist.batch_shape[0]):
n = dist.lengths[i]
root_prob = chart_marginals[i, ..., 0, n-1, :].sum(-1)
self.assert_allclose(root_prob, 1)
self.assert_zeros_and_ones(preterm_marginals.sum(-1))
self.assert_allclose(preterm_marginals.sum((-1, -2)), dist.lengths)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/constituency_pcfg_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution over CTC alignments."""
from typing import Optional
import jax
import jax.numpy as jnp
# pylint: disable=g-multiple-import, g-importing-member
from jaxtyping import Array, Float, Int32, Num
from synjax._src.alignment_monotone_general import GeneralMonotoneAlignmentCRF
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Key, Shape, typed
from synjax._src.utils.semirings import Semiring
class CTC(SemiringDistribution):
"""Distribution over CTC alignments.
References:
Graves et al, 2006: https://www.cs.toronto.edu/~graves/icml_2006.pdf
Hannun, 2017: https://distill.pub/2017/ctc/
"""
log_potentials: Float[Array, "*batch n vocab"]
labels_extended: Int32[Array, "*batch 2*labels+1"]
label_lengths: Int32[Array, "*batch"]
input_lengths: Int32[Array, "*batch"]
@typed
def __init__(
self, log_potentials: Float[Array, "*batch n vocab"],
labels: Int32[Array, "*batch labels"], *,
label_lengths: Optional[Int32[Array, "*batch"]] = None,
input_lengths: Optional[Int32[Array, "*batch"]] = None,
blank_id: int = 0):
super().__init__(log_potentials=log_potentials,
struct_is_isomorphic_to_params=False)
# Inserts blank_id as first symbol, last symbol, and in between all words.
self.labels_extended = jnp.full(
labels.shape[:-1]+(labels.shape[-1]*2+1,), blank_id
).at[..., 1::2].set(labels.astype(jnp.int32))
self.log_potentials = jax.nn.log_softmax(log_potentials, axis=-1)
*batch_shape, cols, _ = self.log_potentials.shape
rows = self.labels_extended.shape[-1]
if label_lengths is None:
self.label_lengths = jnp.full(batch_shape, rows)
else:
self.label_lengths = 2*label_lengths+1
if input_lengths is None:
self.input_lengths = jnp.full(batch_shape, cols+2)
else:
self.input_lengths = input_lengths+2
@property
def batch_shape(self) -> Shape:
return self.log_potentials.shape[:-2]
@property
def event_shape(self) -> Shape:
return self.labels_extended.shape[-1], self.log_potentials.shape[-2]
@typed
def _structure_forward(
self, base_struct: Float[Array, "labels n"], semiring: Semiring,
key: Key) -> Float[Array, "s"]:
labels_extended = self.labels_extended
voc = self.log_potentials.shape[-1]
table = jnp.einsum("...nv,...lv->...ln", self.log_potentials,
jax.nn.one_hot(labels_extended, voc))
table += base_struct
# Insert one extra column in beginning and end to account for
# the possibility of two beginning and two ending states (see Distill blog).
extra_col = jnp.zeros(table.shape[:-1]+(1,))
table = jnp.concatenate((extra_col, table, extra_col), axis=-1)
table = table.at[1:, 0].set(-INF) # first artificial state
table = table.at[2:, 1].set(-INF) # first two states are valid
step_0 = step_1 = table
non_repetitions = labels_extended != jnp.roll(labels_extended, 2, axis=-1)
step_2 = jnp.where(non_repetitions[..., None], table, -INF)
step_2 = step_2.at[..., 1].set(-INF)
step_2 = jnp.where(jnp.arange(step_2.shape[-1]) == self.input_lengths-1,
-INF, step_2)
dist = GeneralMonotoneAlignmentCRF(
(step_0, step_1, step_2), None,
lengths_rows=self.label_lengths, lengths_cols=self.input_lengths)
# pylint: disable=protected-access
return dist._structure_forward(jnp.zeros(dist.event_shape), semiring, key)
@typed
def log_partition(self, use_optax: Optional[bool] = None
) -> Float[Array, "*batch"]:
if use_optax is None:
use_optax = get_config().ctc_use_optax
if use_optax:
n = self.log_potentials.shape[-2]
l = self.labels_extended.shape[-1] // 2
logit_paddings = jnp.arange(n) >= self.input_lengths[:, None]
label_paddings = jnp.arange(l) >= (self.label_lengths[:, None]//2)
labels = self.labels_extended[..., 1::2]
logits = self.log_potentials
# pylint: disable=g-import-not-at-top
# pylint: disable=import-outside-toplevel
import optax
return -optax.ctc_loss(logits, logit_paddings, labels, label_paddings)
else:
return super().log_partition()
@typed
def marginals_for_template_variables(self, **kwargs) -> "CTC":
# This override is needed because Optax internally does normalization.
return super().marginals_for_template_variables(use_optax=False)
@typed
def log_count(self) -> Float[Array, "*batch"]:
"""Log of the count of structures in the support."""
# This override is needed because Optax internally does normalization.
return super().log_count(use_optax=False)
@typed
def loss(self, use_optax: Optional[bool] = None) -> Float[Array, "*batch"]:
return -self.log_partition(use_optax=use_optax)
@typed
def alignment_to_labels(self, alignment: Num[Array, "*batch labels n"]
) -> Num[Array, "*batch n"]:
return jnp.einsum("...ln,...l->...ln", alignment, self.labels_extended
).max(-2).astype(int)
@typed
def log_prob_labels(self, labels: Int32[Array, "*batch n"]
) -> Float[Array, "*batch"]:
n, voc = self.log_potentials.shape[-2:]
scores_per_col = jnp.einsum(
"...nv,...nv->...n", jax.nn.one_hot(labels, voc), self.log_potentials)
mask = jnp.arange(n) < self.input_lengths[..., None]
return jnp.sum(scores_per_col*mask, axis=-1)
|
synjax-master
|
synjax/_src/ctc.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linear_chain_crf."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src import linear_chain_crf
def chain_is_connected(samples) -> bool:
scores = jax.lax.associative_scan(jnp.matmul, samples, axis=-3)
scores = scores[..., -1, :, :].sum((-1, -2))
return jnp.all(scores == 1)
class LinearChainTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key):
b, n, t = 3, 6, 4
log_potentials = jnp.log(jax.random.uniform(key, (b, n, t, t)))
return [linear_chain_crf.LinearChainCRF(log_potentials)]
def create_symmetric_batched_dists(self):
b, n, t = 3, 6, 4
log_potentials = jnp.zeros((b, n, t, t))
return [linear_chain_crf.LinearChainCRF(log_potentials)]
def create_invalid_shape_distribution(self):
b, n, t = 3, 6, 4
log_potentials = jnp.zeros((b, n, t, t-1))
return linear_chain_crf.LinearChainCRF(log_potentials)
def analytic_log_count(self, dist) -> jax.Array:
t = dist.log_potentials.shape[-1]
return dist.lengths * jnp.log(t)
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assert_allclose(marginals[..., 1:-1, :, :], marginals[..., 2:, :, :])
def assert_batch_of_valid_samples(self, dist, samples):
self.assertTrue(chain_is_connected(samples),
"The chain needs to be connected")
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(marginals.sum((-1, -2)), 1)
def test_algorithm_sequential_same_as_parallel(self):
dist = self.create_random_batched_dists(jax.random.PRNGKey(0))[0]
m1 = dist.marginals(forward_algorithm="sequential")
m2 = dist.marginals(forward_algorithm="parallel")
self.assert_allclose(m1, m2)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/linear_chain_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensor-Decomposition PCFG based on Yang et al (2022) and Cohen et al 2013.
References:
Yang et al, 2022: https://aclanthology.org/2022.naacl-main.353.pdf
Cohen et al, 2013: https://aclanthology.org/N13-1052.pdf
"""
# pylint: disable=g-multiple-import, g-importing-member
# pylint: disable=invalid-name
import functools
from typing import NamedTuple, Optional, Union
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src.config import get_config
from synjax._src.constituency_tree_crf import TreeCRF
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import chart_struct
from synjax._src.utils.semirings import Semiring, LogSemiring
from synjax._src.utils.special import max_one_hot
class Event(NamedTuple):
chart: Union[Float[Array, "*batch n n rank"], Shape]
tags: Union[Float[Array, "*batch n pt"], Shape]
class GeneralizedTensorDecompositionPCFG(SemiringDistribution):
"""Tensor-Decomposition Probabilistic Context-Free Grammar.
Cohen et al (2013) showed that PCFG with large number of non-terminals can be
approximated using CPD tensor decomposition. Yang et al (2022) used this to
do efficient grammar induction with large number of non-terminals and
relatively small rank dimesnion. They avoid tensor-decomposition step by
keeping all parameters always in the rank space and enforcing all decomposed
rules to be normalized. This is the same as "unary trick" decompositon of
n-ary rules from Stanojević and Sima'an (2015).
Note that this is a conditional TD-PCFG, i.e. it is a distribution over trees
provided by TD-PCFG conditioned by a provided sentence. Because of that
calling dist.log_probability(tree) returns a p(tree | sentence; td-pcfg).
To get a joint probability of a tree and a sentence
p(tree, sentence ; td-pcfg) call dist.unnormalized_log_probability(tree).
For a short description of normalization see Eisner (2016, S7.1), for long
description see Nederhof and Satta (2003).
References:
Yang et al, 2022 - Section 4.2: https://aclanthology.org/2022.naacl-main.353.pdf
Cohen et al, 2013 - Section 7.1: https://aclanthology.org/N13-1052.pdf#page=8
Stanojević and Sima'an, 2015 - Section 2: https://aclanthology.org/D15-1005.pdf#page=3
Eisner 2016 - Section 7.1: https://aclanthology.org/W16-5901.pdf#page=7
Nederhof and Satta 2003: https://aclanthology.org/W03-3016.pdf
""" # pylint: disable=line-too-long
size_sentence: int = eqx.static_field()
size_nonterminals: int = eqx.static_field()
size_preterminals: int = eqx.static_field()
size_rank: int = eqx.static_field()
preterminal_scores: Float[Array, "*batch n pt"]
root: Float[Array, "*batch nt"]
nt_to_rank: Float[Array, "*batch nt rank"]
rank_to_left_nt: Float[Array, "*batch rank nt+pt"]
rank_to_right_nt: Float[Array, "*batch rank nt+pt"]
lengths: Int32[Array, "*batch"]
@typed
def __init__(self,
*,
preterminal_scores: Float[Array, "*batch n pt"],
root: Float[Array, "*batch nt"],
nt_to_rank: Float[Array, "*batch nt rank"],
rank_to_left_nt: Float[Array, "*batch rank nt+pt"],
rank_to_right_nt: Float[Array, "*batch rank nt+pt"],
lengths: Optional[Int32[Array, "*batch"]] = None):
super().__init__(log_potentials=None, struct_is_isomorphic_to_params=False)
normalize = functools.partial(jax.nn.log_softmax, axis=-1)
self.preterminal_scores = preterminal_scores
self.root = normalize(root)
self.nt_to_rank = normalize(nt_to_rank)
self.rank_to_left_nt = normalize(rank_to_left_nt)
self.rank_to_right_nt = normalize(rank_to_right_nt)
self.size_sentence = preterminal_scores.shape[-2]
self.size_nonterminals = root.shape[-1]
self.size_preterminals = rank_to_left_nt.shape[-1] - self.size_nonterminals
self.size_rank = nt_to_rank.shape[-1]
if lengths is None:
lengths = jnp.full(preterminal_scores.shape[:-2], self.size_sentence)
self.lengths = lengths
@property
def event_shape(self) -> Event:
chart_shape = self.size_sentence, self.size_sentence, self.size_rank
preterm_shape = self.size_sentence, self.size_preterminals
return Event(chart_shape, preterm_shape)
@property
def batch_shape(self) -> Shape:
return self.root.shape[:-1]
@typed
def _structure_forward(self, base_struct: Event, semiring: Semiring, key: Key
) -> Float[Array, "s"]:
base_chart, base_preterm = base_struct
sr = semiring # Simple renaming because semiring is used frequently here.
if not isinstance(sr, LogSemiring):
raise NotImplementedError("This distribution supports only LogSemiring.")
n = self.size_sentence
nt = self.size_nonterminals
# These rules go bottom-up because that is the way CKY parsing works.
left_unary = sr.einsum("srx,sxf->srf", # s rank_binary(r)->rank_left(f)
sr.wrap(self.rank_to_left_nt[:, :nt]),
sr.wrap(self.nt_to_rank))
right_unary = sr.einsum("srx,sxf->srf", # s rank_binary->rank_right
sr.wrap(self.rank_to_right_nt[:, :nt]),
sr.wrap(self.nt_to_rank))
root_unary = sr.einsum("sx,sxr->sr", # s rank_of_binary_root_node
sr.wrap(self.root),
sr.wrap(self.nt_to_rank))
base_chart = chart_struct.from_cky_table(sr.wrap(base_chart))
left_chart = chart_struct.from_cky_table(sr.one((n, n, self.size_rank)))
right_chart = chart_struct.from_cky_table(sr.one((n, n, self.size_rank)))
keys = jax.random.split(key, 3*n+3)
# Span size 1
preterminal_scores = sr.wrap(self.preterminal_scores + base_preterm)
left_chart = left_chart.set_entries(
1, sr.einsum("snp,srp->snr",
preterminal_scores, sr.wrap(self.rank_to_left_nt[:, nt:]),
key=keys[2]))
right_chart = right_chart.set_entries(
1, sr.einsum("snp,srp->snr",
preterminal_scores, sr.wrap(self.rank_to_right_nt[:, nt:]),
key=keys[3]))
def loop(state, d):
left_chart, right_chart = state
rank_state = sr.einsum("sir,sijr,sijr->sir",
base_chart.get_entries(d),
left_chart.left(),
right_chart.right(d, sr), key=keys[3*d])
left_chart = left_chart.set_entries(d, sr.einsum(
"sfr,sir->sif", left_unary, rank_state, key=keys[3*d+1]))
right_chart = right_chart.set_entries(d, sr.einsum(
"sfr,sir->sif", right_unary, rank_state, key=keys[3*d+2]))
return (left_chart, right_chart), rank_state[:, 0]
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
_, rank_states = jax.lax.scan(loop, (left_chart, right_chart),
jnp.arange(2, n+1))
rank_state = rank_states[self.lengths-2] # s r
return sr.einsum("sr,sr->s", rank_state, root_unary, key=keys[0])
@typed
def mbr(self, *, marginalize_labels: bool, **kwargs) -> Event:
"""Minimum-Bayes Risk decoding.
Args:
marginalize_labels: Flag that controls if metric that is used by MBR is
labelled or unlabled span recall, whereby labels are ranks,
not non-terminals.
**kwargs: Other optional kwargs that will be used by TreeCRF for decoding.
Returns:
The decoded structure. If marginalize_labels is the last axis, the one
reserved for rank, will be of size 1.
"""
chart_log_marginals, preterm_log_marginals = self.log_marginals()
chart_log_marginals *= 1 - jnp.eye(self.size_sentence)[:, :, None]
if marginalize_labels:
chart_log_marginals = jax.nn.logsumexp(
chart_log_marginals, axis=-1, keepdims=True)
tree = TreeCRF(chart_log_marginals, lengths=self.lengths).argmax(**kwargs)
tree = jnp.where(jnp.eye(self.size_sentence)[:, :, None], 0, tree)
return Event(tree, max_one_hot(preterm_log_marginals, -1))
class TensorDecompositionPCFG(GeneralizedTensorDecompositionPCFG):
__doc__ = GeneralizedTensorDecompositionPCFG.__doc__
word_ids: Int32[Array, "*batch n"]
emission: Float[Array, "*batch pt voc"]
@typed
def __init__(self, emission: Float[Array, "*batch pt voc"],
root: Float[Array, "*batch nt"],
nt_to_rank: Float[Array, "*batch nt rank"],
rank_to_left_nt: Float[Array, "*batch rank nt+pt"],
rank_to_right_nt: Float[Array, "*batch rank nt+pt"],
word_ids: Int32[Array, "*batch n"],
lengths: Optional[Int32[Array, "*batch"]] = None):
"""Constructs standard version of Tensor-Decomposition PCFG."""
self.word_ids = word_ids
self.emission = emission
emission = jax.nn.log_softmax(emission, -1)
preterm_scores = jnp.take_along_axis(emission, word_ids[..., None, :], -1)
preterm_scores = jnp.swapaxes(preterm_scores, -1, -2)
super().__init__(
root=root, nt_to_rank=nt_to_rank, rank_to_left_nt=rank_to_left_nt,
rank_to_right_nt=rank_to_right_nt, lengths=lengths,
preterminal_scores=preterm_scores)
|
synjax-master
|
synjax/_src/constituency_tensor_decomposition_pcfg.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution representing linear chain CRF."""
# pylint: disable=g-multiple-import, g-importing-member
import math
from typing import Literal, Optional
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Key, typed
from synjax._src.utils.semirings import Semiring
@typed
class LinearChainCRF(SemiringDistribution):
"""Distribution representing linear chain CRF.
References:
Lafferty et al, 2001: https://repository.upenn.edu/cgi/viewcontent.cgi?article=1162&context=cis_papers
Sutton and McCallum, 2012: https://homepages.inf.ed.ac.uk/csutton/publications/crftutv2.pdf
Collins notes 2011 -- http://www.cs.columbia.edu/~mcollins/crf.pdf
""" # pylint: disable=line-too-long
log_potentials: Float[Array, "*batch n t t"]
lengths: Int32[Array, "*batch"]
def __init__(self, log_potentials: Float[Array, "*batch n t t"],
lengths: Optional[Int32[Array, "*batch"]] = None, **kwargs):
"""Linear Chain CRFs for a sequence of length n with t states.
References:
Lafferty et al, 2001: https://repository.upenn.edu/cgi/viewcontent.cgi?article=1162&context=cis_papers
Sutton and McCallum, 2012: https://homepages.inf.ed.ac.uk/csutton/publications/crftutv2.pdf
Collins notes 2011 -- http://www.cs.columbia.edu/~mcollins/crf.pdf
Args:
log_potentials:
For a sentence of n words log_potentials will have shape
(..., n, t, t). The entry of log_potentials[i, t1, t2] represents
the log_potential of an edge (i-1, t1) -> (i, t2). In other words
log_potentials[i] are representing edges entering word at position i.
Zero-th transition matrix shows transitions from initial state
(non-word state) into the first word at position 0. This means that in
the 0th transition matrix all rows except for the 0th one are ignored.
lengths:
Lengths of each entry in the batch. It has the same shape as the
batch and dtype of jnp.int32. If it's not passed, the maximal length
will be assumed based on the log_potentials.shape[-3].
**kwargs: Additional optional args to pass to superclass constructors.
"""
super().__init__(log_potentials=log_potentials, **kwargs)
if lengths is None:
lengths = jnp.full(self.batch_shape, self.event_shape[0])
self.lengths = lengths
@property
def event_shape(self):
return self.log_potentials.shape[-3:]
@typed
def _structure_forward(
self, base_struct: Float[Array, "n t t"], semiring: Semiring, key: Key,
forward_algorithm: Optional[Literal["sequential", "parallel"]] = None
) -> Float[Array, "s"]:
if forward_algorithm is None:
forward_algorithm = get_config().linear_chain_crf_forward_algorithm
if forward_algorithm == "sequential":
return self._structure_forward_sequential(base_struct, semiring, key)
elif forward_algorithm == "parallel":
return self._structure_forward_parallel(base_struct, semiring, key)
else:
raise NotImplementedError
@typed
def _structure_forward_sequential(
self, base_struct: Float[Array, "n t t"], semiring: Semiring,
key: Key) -> Float[Array, "s"]:
"""Forward algorithm with complexity O(n t^2)."""
base_struct = base_struct.at[0, 1:].set(-INF)
n, t = self.log_potentials.shape[-3:-1]
def loop(state, inp):
matrix, key = inp
state = semiring.einsum("si,sij->sj", state, matrix, key=key)
return state, state
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
keys = jax.random.split(key, n+1) # (n+1, 2)
seq = semiring.wrap(base_struct+self.log_potentials) # (s, n, t, t)
seq = jnp.swapaxes(seq, 0, 1) # (n, s, t, t)
state = semiring.one(t)
_, states = jax.lax.scan(loop, state, (seq, keys[:-1]))
state = states[self.lengths-1]
return semiring.sum(state, axis=-1, key=keys[-1])
@typed
def _structure_forward_parallel(
self, base_struct: Float[Array, "n t t"], semiring: Semiring,
key: Key) -> Float[Array, "s"]:
"""Forward algorithm with parallel complexity O(log(n) t^3).
This is inspired by the algorithm of Hassan et al (2021) and used by
Rush (2020). This algorithm reduces parallel time with respect to length
dimension n but it increases it with respect to t. It additionally increases
sequential complexity to O(n log(n) t^3). In most cases sequential algorithm
(the default one) should be faster, but in some extreme lengths parallel
algorithm may be more numerically stable -- for the same reason as pairwise
summation can be more accurate than sequential summation (Higham, 1993).
References:
Rush, 2020 - Section 6a: https://arxiv.org/pdf/2002.00876.pdf
Hassan et al, 2021: https://arxiv.org/pdf/2102.05743.pdf
Higham, 1993: https://doi.org/10.1137/0914050
Args:
base_struct: Dummy structure that will be used to track gradients.
semiring: Semiring used for the computation.
key: Key that will be used if semiring is a sampling semiring.
Returns:
Log-partition under a given semiring.
"""
base_struct = base_struct.at[0, 1:].set(-INF)
real_n, t = self.log_potentials.shape[-3: -1]
log_n = math.ceil(math.log(real_n, 2))
extension_shape = (int(2**log_n)-real_n, t, t)
base_struct_extended = jnp.concatenate(
(base_struct, jnp.zeros(extension_shape)), axis=-3)
base_struct_extended = _mask_out_base_struct(base_struct_extended,
self.lengths)
log_potentials = jnp.concatenate(
(self.log_potentials, jnp.zeros(extension_shape)), axis=-3)
seq = semiring.wrap(base_struct_extended+log_potentials) # (s, real_n, t, t)
keys = jax.random.split(key, log_n+1)
def loop(aseq, akey):
left = aseq[:, 0::2, :, :]
right = aseq[:, 1::2, :, :]
return semiring.einsum("snij,snjk->snik", left, right, key=akey)
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
for step in range(log_n):
seq = loop(seq, keys[step])
matrix = seq.squeeze(-3)
return semiring.sum(matrix, axis=(-1, -2), key=keys[-1])
@typed
def _mask_out_base_struct(base_struct: Float[Array, "*batch n t t"],
lengths: Int32[Array, "*batch"]
) -> Float[Array, "*batch n t t"]:
"""Masks-out parts of base-struct that don't fit seqence length."""
n, t = base_struct.shape[-3:-1]
padding_mask = jnp.arange(0, n) >= lengths[..., None]
padding_mask = jnp.broadcast_to(padding_mask[..., None, None],
(*padding_mask.shape, t, t))
padding_mask = padding_mask != padding_mask.at[..., 0].set(False)
potentials_mask = jnp.arange(0, n) < lengths[..., None]
potentials_mask = jnp.broadcast_to(potentials_mask[..., None, None],
(*potentials_mask.shape, t, t))
return jnp.where(potentials_mask,
base_struct, jnp.where(padding_mask, 0, -INF))
|
synjax-master
|
synjax/_src/linear_chain_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some constants used throughout SynJax."""
INF = 1e5
EPS = 1e-5
MTT_LOG_EPS = -15
TESTING_RELATIVE_TOLERANCE = 1e-4
TESTING_ABSOLUTE_TOLERANCE = 1e-4
|
synjax-master
|
synjax/_src/constants.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution of spanning trees."""
# pylint: disable=g-multiple-import,g-importing-member
from __future__ import annotations
from typing import cast, Optional, Union, Tuple
import equinox as eqx
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src.distribution import Distribution
from synjax._src.spanning_tree_non_projective_crf import SpanningTreeNonProjectiveCRF
from synjax._src.spanning_tree_projective_crf import SpanningTreeProjectiveCRF
from synjax._src.typing import Shape, Key, typed
class SpanningTreeCRF(Distribution):
"""Unified interface to all spanning tree distributions."""
directed: bool = eqx.static_field()
projective: bool = eqx.static_field()
single_root_edge: bool = eqx.static_field()
_dist: Union[SpanningTreeNonProjectiveCRF, SpanningTreeProjectiveCRF]
@typed
def __init__(self,
log_potentials: Float[Array, "*batch n n"],
*,
directed: bool,
projective: bool,
single_root_edge: bool,
lengths: Optional[Int32[Array, "*batch"]] = None):
"""Spanning Tree Conditional-Random Field distribution.
This distribution is used for modeling spanning trees of graphs with n
nodes. In case it is a directed spanning tree (i.e. arborescence) The 0th
nodes is treated as the special root node by convention. In case the graph
is undirected, the log-potentials will be symmetrized (this is a noop if
they are already symmetric). The implementation optionally allows for
a constraint that enforces the spanning trees to have only one edge coming
out of the root node. It also optionally allows for constraining trees to be
projective, oftentimes useful in dependency parsing of natural language.
See Koo et al (2007) for a standard version of this model.
References:
Koo et al, 2007: https://aclanthology.org/D07-1015.pdf
Stanojević, 2022: https://aclanthology.org/2022.emnlp-main.110.pdf
Stanojević and Cohen, 2021: https://aclanthology.org/2021.emnlp-main.823.pdf
Zmigrod et al, 2021: https://aclanthology.org/2021.emnlp-main.824v2.pdf
Zmigrod et al, 2021: https://arxiv.org/pdf/2008.12988.pdf
Colbourn et al, 1996: https://www.sciencedirect.com/science/article/pii/S0196677496900140
Kuhlmann et al, 2011: https://aclanthology.org/P11-1068.pdf
Eisner, 2000: https://www.cs.jhu.edu/~jason/papers/eisner.iwptbook00.pdf
Args:
log_potentials: jax.Array of shape (..., n, n). If graph is directed, 0th
column will be ignored because 0th node will be treated as
the root node. If graph is undirected log-potentials will
be symmetrized -- this is a noop if they are already
symmetric.
directed: Boolean flag signifying if the tree is directed (arborescence).
projective: Boolean flag signifying if the tree should be projective,
i.e. if there should be no crossing tree branches when nodes
are positioned on a single line with their canonical order
(see Eisner, 2020).
single_root_edge: Boolean flag signifying if the number of arcs leaving
root node (node at position 0) should be exactly 1.
lengths: Optional array providing the length of non-root nodes in the
graphs. The "non-root" part is important. This array,
if provided, will be used for automatic padding.
""" # pylint: disable=line-too-long
super().__init__(log_potentials=None)
self.directed = directed
self.projective = projective
self.single_root_edge = single_root_edge
if not directed:
# Symmetrize log_potentials.
log_potentials = (log_potentials + jnp.swapaxes(log_potentials, -2, -1))/2
cls = (SpanningTreeProjectiveCRF if projective
else SpanningTreeNonProjectiveCRF)
self._dist = cls(log_potentials, lengths=lengths,
single_root_edge=single_root_edge)
@property
def event_shape(self) -> Shape:
return self._dist.event_shape
@property
def batch_shape(self) -> Shape:
return self._dist.batch_shape
@property
def lengths(self) -> Array:
return self._dist.lengths
@typed
def _remove_padding(self, event: Float[Array, "*xy n n"]
) -> Float[Array, "*xy n n"]:
"""Removes padding elements introduced for computing log-partition."""
x = jnp.arange(event.shape[-1]) < self.lengths[..., None]
mask = x[..., None, :] & x[..., None]
return jnp.where(mask, event, 0)
@typed
def sample_without_replacement(self, key: Key, k: int
) -> Tuple[Float[Array, "k *batch n n"],
Float[Array, "k *batch"],
Float[Array, "k *batch"]]:
"""Sampling without replacement from Stanojević (2022).
References:
Stanojević, 2022: https://aclanthology.org/2022.emnlp-main.110.pdf
Args:
key: Sampling key.
k: The number of required samples without replacement.
Returns:
Tuple of (samples, logprobs, gumbel perturbed logprobs)
"""
if self.projective:
raise NotImplementedError("There is no implementation of sampling "
"without replacement for projective trees.")
dist = cast(SpanningTreeNonProjectiveCRF, self._dist)
samples, logprobs, gumbel_logprobs = dist.sample_without_replacement(key, k)
if not self.directed:
samples = samples + jnp.swapaxes(samples, -2, -1)
samples = self._remove_padding(samples)
return samples, logprobs, gumbel_logprobs
@typed
def sample(self, key: Key, sample_shape: Union[Shape, int] = (), **kwargs
) -> Float[Array, "... n n"]:
samples = self._dist.sample(key=key, sample_shape=sample_shape, **kwargs)
if not self.directed:
samples = samples + jnp.swapaxes(samples, -2, -1)
samples = self._remove_padding(samples)
return samples
@typed
def normalize_log_probs(self, scores: Float[Array, "*b"]
) -> Float[Array, "*b"]:
return self._dist.normalize_log_probs(scores)
@typed
def log_prob(self, event: Float[Array, "*b n n"], **kwargs
) -> Float[Array, "*b"]:
event = self._remove_padding(event)
if not self.directed:
event = jnp.triu(event)
return self._dist.log_prob(event, **kwargs)
@typed
def unnormalized_log_prob(self, event: Float[Array, "*b n n"], **kwargs
) -> Float[Array, "*b"]:
event = self._remove_padding(event)
if not self.directed:
event = jnp.triu(event)
return self._dist.unnormalized_log_prob(event, **kwargs)
@typed
def log_partition(self, **kwargs) -> Float[Array, "*batch"]:
return self._dist.log_partition(**kwargs)
@typed
def marginals_for_template_variables(self, **kwargs
) -> Float[Array, "*batch n n"]:
return self._dist.marginals_for_template_variables(**kwargs)
@typed
def marginals(self, **kwargs) -> Float[Array, "*batch n n"]:
m = self._dist.marginals(**kwargs)
if not self.directed:
m = m + jnp.swapaxes(m, -2, -1)
m = self._remove_padding(m)
return m
@typed
def argmax(self, **kwargs) -> Float[Array, "*batch n n"]:
tree = self._dist.argmax(**kwargs)
if not self.directed:
tree = tree + jnp.swapaxes(tree, -2, -1)
tree = self._remove_padding(tree)
return tree
@typed
def argmax_and_max(self, **kwargs) -> Tuple[Float[Array, "*batch n n"],
Float[Array, "*batch"]]:
tree, score = self._dist.argmax_and_max(**kwargs)
if not self.directed:
tree = tree + jnp.swapaxes(tree, -2, -1)
tree = self._remove_padding(tree)
return tree, score
@typed
def top_k(self, k: int, **kwargs) -> Tuple[Float[Array, "k *batch n n"],
Float[Array, "k *batch"]]:
trees, scores = self._dist.top_k(k, **kwargs)
if not self.directed:
trees = trees + jnp.swapaxes(trees, -2, -1)
trees = self._remove_padding(trees)
return trees, scores
@typed
def entropy(self, **kwargs) -> Float[Array, "*batch"]:
return self._dist.entropy(**kwargs)
@typed
def cross_entropy(self, other: SpanningTreeCRF, **kwargs
) -> Float[Array, "*batch"]:
if self.directed != other.directed:
raise ValueError("Cross entropy cannot be computed between directed and"
"undirected spanning tree distributions.")
# pylint: disable=protected-access
return self._dist.cross_entropy(other._dist, **kwargs)
@typed
def kl_divergence(self, other: SpanningTreeCRF, **kwargs
) -> Float[Array, "*batch"]:
if self.directed != other.directed:
raise ValueError("Cross entropy cannot be computed between directed and"
"undirected spanning tree distributions.")
# pylint: disable=protected-access
return self._dist.kl_divergence(other._dist, **kwargs)
|
synjax-master
|
synjax/_src/spanning_tree_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for spanning_tree_crf.
Here we test only undirected cases since directed cases are tested in the
spanning_tree_non_projective_crf_test and spanning_tree_projective_crf_test.
"""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src import spanning_tree_crf
SpanningTreeCRF = spanning_tree_crf.SpanningTreeCRF
class SpanningTreeCrfTest(distribution_test.DistributionTest):
def _create_dist(self, f):
b, n = 2, 6
return [spanning_tree_crf.SpanningTreeCRF(
log_potentials=f((b, n, n)), directed=False, single_root_edge=True,
projective=projective) for projective in [True, False]]
def create_random_batched_dists(self, key: jax.random.KeyArray):
return self._create_dist(lambda shape: jax.random.uniform(key, shape))
def create_symmetric_batched_dists(self):
return self._create_dist(jnp.zeros)
def create_invalid_shape_distribution(self):
return spanning_tree_crf.SpanningTreeCRF(
log_potentials=jnp.zeros((2, 6, 6-1)), directed=False,
single_root_edge=True, projective=True)
def test_log_count(self):
# Skips testing for log-count since there is no simple unified formula for
# all supported sub-types of distributions.
pass
def assert_is_symmetric(self, dist, marginals) -> bool:
del dist
self.assert_allclose(marginals, jnp.swapaxes(marginals, -1, -2))
def assert_batch_of_valid_samples(self, dist, samples):
_, n = dist.event_shape
l = dist.lengths[..., None, None]
mask = (jnp.arange(n) < l) & (jnp.arange(n)[:, None] < l)
self.assert_allclose(jnp.where(mask, 0, samples), 0)
if not dist.directed:
self.assert_is_symmetric(dist, samples)
if dist.single_root_edge:
self.assert_allclose(jnp.sum(samples[..., 0, :], -1), 1)
def assert_valid_marginals(self, dist, marginals):
if not dist.directed:
self.assert_is_symmetric(dist, marginals)
def test_top_k(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.check_top_k_single_dist(dist, check_prefix_condition=dist.projective)
def test_sample_without_replacement(self):
args = [jax.random.PRNGKey(0), 3]
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
if dist.projective:
self.assertRaises(NotImplementedError,
dist.sample_without_replacement, *args)
else:
dist.sample_without_replacement(*args) # Should not crash.
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/spanning_tree_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of the interface of all SynJax distributions."""
from __future__ import annotations
# pylint: disable=g-multiple-import
# pylint: disable=g-long-lambda
# pylint: disable=g-importing-member
# pylint: disable=protected-access
import functools
from typing import TypeVar, cast, Optional, Union, Tuple
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Num, PyTree
from synjax._src.constants import INF
from synjax._src.typing import Key, Shape, typed
from synjax._src.utils import semirings
from synjax._src.utils import special
Self = TypeVar("Self")
Event = PyTree[Float[Array, "..."]]
SoftEvent = PyTree[Float[Array, "..."]]
vmap_ndim = special.vmap_ndim
grad_ndim = special.grad_ndim
partial = functools.partial
tree_leaves = jax.tree_util.tree_leaves
prob_clip = partial(jax.tree_map, lambda x: jnp.clip(jnp.nan_to_num(x), 0))
tlog = partial(jax.tree_map, special.safe_log)
tmul = partial(jax.tree_map, jnp.multiply)
tadd = partial(jax.tree_map, jnp.add)
tsub = partial(jax.tree_map, jnp.subtract)
tsum_all = lambda x: functools.reduce(jnp.add, map(jnp.sum, tree_leaves(x)))
is_shape = lambda x: isinstance(x, tuple) and all(isinstance(y, int) for y in x)
@typed
class Distribution(eqx.Module):
"""Abstract base class for all distributions."""
log_potentials: Optional[PyTree[Float[Array, "..."]]]
@typed
def log_count(self, **kwargs) -> Float[Array, "*batch"]:
"""Log of the count of structures in the support."""
replace_fn = lambda x: jnp.where(x <= -INF, -INF, 0)
safe_replace_fn = lambda x: replace_fn(x) if eqx.is_inexact_array(x) else x
return jax.tree_map(safe_replace_fn, self).log_partition(**kwargs)
@property
def event_shape(self) -> PyTree[int]:
"""PyTree of shapes of the event arrays."""
raise NotImplementedError
@property
def batch_shape(self):
"""Shape of the batch."""
if isinstance(self.log_potentials, Array) and is_shape(self.event_shape):
return cast(Array, self.log_potentials).shape[:-len(self.event_shape)]
else:
raise NotImplementedError
@property
def batch_ndim(self):
return len(self.batch_shape)
@typed
def _single_sample(self, key: Key, **kwargs) -> Event:
raise NotImplementedError
@typed
def sample(self, key: Key, sample_shape: Union[int, Shape] = (), **kwargs
) -> Event:
"""Samples an event.
Args:
key: KeyArray key or integer seed.
sample_shape: Additional leading dimensions for sample.
**kwargs: Additional distribution specific kwargs.
Returns:
A sample of shape `sample_shape` + `batch_shape` + `event_shape`.
"""
sample_shape = special.asshape(sample_shape)
keys = special.split_key_for_shape(key, sample_shape)
fn = lambda key: self._single_sample(key=key, **kwargs)
return vmap_ndim(fn, len(sample_shape))(keys)
@typed
def log_prob(self, event: Event, **kwargs) -> Float[Array, "*batch"]:
"""Normalized Log probability of an event."""
scores = self.unnormalized_log_prob(event, **kwargs)
return scores - self.log_partition(**kwargs)
@typed
def unnormalized_log_prob(self, event: Event) -> Float[Array, "..."]:
r"""Unnormalized probability of an event."""
bcast_ndim = self._bcast_ndim(event)
f = lambda a, b: jnp.sum(a*b, range(bcast_ndim+self.batch_ndim, a.ndim))
leaf_sums = jax.tree_map(f, event, self.log_potentials)
return vmap_ndim(tsum_all, bcast_ndim+self.batch_ndim)(leaf_sums)
def _bcast_ndim(self, event: Event) -> int:
leaf0 = lambda x: tree_leaves(x, is_leaf=is_shape)[0]
return leaf0(event).ndim - len(leaf0(self.event_shape)) - self.batch_ndim
@typed
def log_partition(self, **kwargs) -> Float[Array, "*batch"]:
"""Log-partition function."""
raise NotImplementedError
@typed
def marginals_for_template_variables(self: Self, **kwargs) -> Self:
"""Marginal prob. of template parts (e.g. PCFG rules instead tree nodes)."""
grad_f = grad_ndim(lambda x: x.log_partition(**kwargs), self.batch_ndim)
return prob_clip(grad_f(self))
@typed
def marginals(self, **kwargs) -> SoftEvent:
"""Marginal probability of structure's parts."""
return self.marginals_for_template_variables(**kwargs).log_potentials
@typed
def log_marginals(self, **kwargs) -> SoftEvent:
"""Logs of marginal probability of structure's parts."""
return tlog(self.marginals(**kwargs))
@typed
def argmax(self, **kwargs) -> Event:
"""Finds the highest scoring structure.
Args:
**kwargs: Keyword arguments for the underlying distribution.
Returns:
The highest scoring structure and its score. In case of ties some
distributions return fractional structures (i.e. edges may not be only 0
and 1 but any number in between). Those distributions support strict_max
parameter that will arbitrarily break the ties and remove fractional
structures at a price of needing more compute.
"""
return self.argmax_and_max(**kwargs)[0]
@typed
def argmax_and_max(self, **kwargs) -> Tuple[Event, Float[Array, "*batch"]]:
"""Finds the highest scoring structure and its unnormalized score.
Args:
**kwargs: Keyword arguments for the underlying distribution.
Returns:
The highest scoring structure and its score. In case of ties some
distributions return fractional structures (i.e. edges may not be only 0
and 1 but any number in between). Those distributions support strict_max
parameter that will arbitrarily break the ties and remove fractional
structures at a price of needing more compute.
"""
raise NotImplementedError
@typed
def top_k(self, k: int, approximate: bool = False, **kwargs
) -> Tuple[PyTree[Num[Array, "k ..."]], Float[Array, "k ..."]]:
"""Finds top-k structures and their scores."""
raise NotImplementedError
@typed
def entropy(self, **kwargs) -> Float[Array, "*batch"]:
"""Calculates the Shannon entropy (in nats).
Based on Li and Eisner (2009). Similar statements are appear in
Martins et al (2010) and Zmigrod et al (2021).
References:
Li and Eisner, 2009 - Section 6.1: https://aclanthology.org/D09-1005.pdf#page=9
Martins et al, 2010 - Equation 9: https://aclanthology.org/D10-1004.pdf#page=4
Zmigrod et al, 2021 - Section 6.2: https://aclanthology.org/2021.tacl-1.41.pdf#page=10
Args:
**kwargs: Additional arguments for computation of marginals.
Returns:
Entropy value.
""" # pylint: disable=line-too-long
return self.cross_entropy(self, **kwargs)
@typed
def cross_entropy(self: Self, other: Self, **kwargs
) -> Float[Array, "*batch"]:
"""Calculates the cross entropy to another distribution (in nats).
References:
Li and Eisner, 2009 - Section 6.1: https://aclanthology.org/D09-1005.pdf#page=9
Args:
other: A compatible distribution.
**kwargs: Additional arguments for computation of marginals.
Returns:
The cross entropy `H(self || other_dist)`.
""" # pylint: disable=line-too-long
def param_leaves(x):
return [y for y in tree_leaves(x) if eqx.is_inexact_array(y)]
p_marginals = param_leaves(self.marginals_for_template_variables(**kwargs))
q_log_potentials = param_leaves(other)
q_log_z = other.log_partition(**kwargs)
return q_log_z - vmap_ndim(tsum_all, self.batch_ndim
)(tmul(p_marginals, q_log_potentials))
@typed
def kl_divergence(self: Self, other: Self, **kwargs
) -> Float[Array, "*batch"]:
"""Calculates the KL divergence to another distribution (in nats).
References:
Li and Eisner, 2009 - Section 6.1: https://aclanthology.org/D09-1005.pdf#page=9
Args:
other: A compatible distribution
**kwargs: Additional arguments for computation of marginals.
Returns:
The KL divergence `KL(self || other)`.
""" # pylint: disable=line-too-long
return self.cross_entropy(other, **kwargs) - self.entropy(**kwargs)
def __getitem__(self: Self, i) -> Self:
"""If distribution is batched, indexes sub-distribution from the batch."""
return jax.tree_map(lambda x: x[i], self)
class SemiringDistribution(Distribution):
"""Abstract class representing structured distributions based on semirings."""
struct_is_isomorphic_to_params: bool = eqx.static_field(default=True)
@typed
def unnormalized_log_prob(self: Self, event: Event, **kwargs
) -> Float[Array, "..."]:
r"""Unnormalized score of an event.
Args:
event: Structures that distribution can broadcast over.
**kwargs: Additional keyword arguments that are passed to
log-partition function.
Returns:
Unnormalized log-probs for each sample.
"""
if self.struct_is_isomorphic_to_params:
return super().unnormalized_log_prob(event)
else:
# This is useful mostly for distributions like PCFG where parameters
# (in PCFG case that is the grammar) are not of the same form as marginals
# (in PCFG case that is a chart).
sr = semirings.LogSemiring()
key = jax.random.PRNGKey(0)
def f_single_sample_single_batch(
base_struct: SoftEvent, dist: Self) -> Float[Array, ""]:
return sr.unwrap(dist._structure_forward(
jax.tree_map(lambda x: jnp.where(x, 0, -INF), base_struct),
sr, key=key, **kwargs))
def f_single_sample_multi_batch(base_struct: SoftEvent
) -> Float[Array, "*batch"]:
return vmap_ndim(f_single_sample_single_batch, self.batch_ndim
)(base_struct, self)
def f_multi_sample_multi_batch(base_struct: SoftEvent
) -> Float[Array, "*sample_batch"]:
return vmap_ndim(f_single_sample_multi_batch, self._bcast_ndim(event)
)(base_struct)
log_probs = f_multi_sample_multi_batch(event)
return log_probs
@typed
def log_partition(self, **kwargs) -> Float[Array, "*batch"]:
"""Compute the log-partition function."""
sr = semirings.LogSemiring()
def f(dist, base):
return dist._structure_forward(base, sr, jax.random.PRNGKey(0), **kwargs)
result = vmap_ndim(f, self.batch_ndim)(self, self._batched_base_structure())
return sr.unwrap(jnp.moveaxis(result, -1, 0))
@typed
def argmax_and_max(self, strict_max: Optional[bool] = None, **kwargs
) -> Tuple[Event, Float[Array, "*batch"]]:
"""Calculates the argmax and max."""
sr = semirings.MaxSemiring(strict_max=strict_max)
def f(base_struct, dist):
max_score = dist._structure_forward(
base_struct, sr, key=jax.random.PRNGKey(0), **kwargs)
max_score = sr.unwrap(max_score)
return max_score, max_score
max_structs, max_scores = grad_ndim(f, self.batch_ndim, has_aux=True
)(self._batched_base_structure(), self)
return max_structs, max_scores
@typed
def marginals(self: Self, **kwargs) -> SoftEvent:
"""Marginal probability of structure's parts."""
sr = semirings.LogSemiring()
def f(base_struct: SoftEvent, dist: Self) -> Float[Array, "*batch"]:
return sr.unwrap(dist._structure_forward(
base_struct, sr, key=jax.random.PRNGKey(0), **kwargs))
m = grad_ndim(f, self.batch_ndim)(self._batched_base_structure(), self)
return prob_clip(m)
@typed
def _single_sample(self, key: Key, **kwargs) -> Event:
"""Finds a single sample per each batched distribution.
Args:
key: KeyArray to use for sampling. It is a single key that will be
split for each batch element.
**kwargs: Any additional arguments needed for forward pass
Returns:
Single sample for each distribution in the batch.
"""
keys = special.split_key_for_shape(key, self.batch_shape)
sr = semirings.SamplingSemiring()
def f(base_struct, dist, akey):
return sr.unwrap(dist._structure_forward(base_struct, sr, akey, **kwargs))
samples = grad_ndim(f, self.batch_ndim
)(self._batched_base_structure(), self, keys)
return samples
@typed
def top_k(self, k: int, approximate: bool = False, **kwargs
) -> Tuple[PyTree[Num[Array, "k ..."]], Float[Array, "k ..."]]:
"""Finds top_k structures.
Args:
k: Number of top elements.
approximate: Should k-best be approximate.
**kwargs: Additional kwargs for the distribution specific forward method.
Returns:
A tuple where first element is an array of top k structures and second
element is an array of their scores that are unnormalized.
"""
if k <= 0:
raise ValueError("k must be a strictly positive integer")
if k == 1:
# This is a shortcut optimization for a special case.
best, score = self.argmax_and_max()
expand = partial(jax.tree_map, lambda x: x[None])
return expand(best), expand(score)
def kbest_forward(base_struct, dist):
kbest_scores = dist._structure_forward(
base_struct, semirings.KBestSemiring(k, approximate=approximate),
key=jax.random.PRNGKey(0), **kwargs)
return kbest_scores, kbest_scores
def kbest_per_dist(base_struct, dist):
return jax.jacrev(kbest_forward, has_aux=True)(base_struct, dist)
kbest_structs, kbest_scores = vmap_ndim(kbest_per_dist, self.batch_ndim)(
self._batched_base_structure(), self)
move = lambda x: jnp.moveaxis(x, self.batch_ndim, 0)
kbest_structs = jax.tree_map(move, kbest_structs)
kbest_scores = move(kbest_scores)
return kbest_structs, kbest_scores
@typed
def _batched_base_structure(self) -> SoftEvent:
leaves_shapes, defs = jax.tree_util.tree_flatten(self.event_shape, is_shape)
leaves = [jnp.zeros(self.batch_shape+shape) for shape in leaves_shapes]
return jax.tree_util.tree_unflatten(defs, leaves)
@typed
def _structure_forward(
self, base_struct: SoftEvent,
semiring: semirings.Semiring, key: Key, **kwargs) -> Float[Array, "s"]:
"""Computes partition under a semiring for single instance."""
raise NotImplementedError
|
synjax-master
|
synjax/_src/distribution.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for alignment_simple."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import alignment_simple
from synjax._src import distribution_test
from synjax._src.utils import special
AlignmentCRF = alignment_simple.AlignmentCRF
class AlignmentCrfTest(distribution_test.DistributionTest):
def analytic_log_count(self, dist: distribution_test.Distribution
) -> jax.Array:
if dist.alignment_type == "non_monotone_one_to_one":
return jax.scipy.special.gammaln(dist._lengths)
elif dist.alignment_type == "monotone_many_to_many":
return special.log_delannoy(
dist._dist.lengths_rows-1, dist._dist.lengths_cols-1,
max_input_value=min(*dist.event_shape))
else:
raise NotImplementedError
def test_argmax(self):
key = jax.random.PRNGKey(0)
b, n = 3, 5
dists = self.create_random_batched_dists(key)
dists.append(AlignmentCRF(jax.random.normal(key, (b, n, n)),
alignment_type="non_monotone_one_to_one"))
for dist in dists:
assert dist.batch_shape
best = dist.argmax()
self.assert_zeros_and_ones(best)
self.assert_batch_of_valid_samples(dist, best)
self.assert_valid_marginals(dist, best)
struct_potential = jnp.exp(dist.unnormalized_log_prob(best))
self.assertEqual(struct_potential.shape, dist.batch_shape)
self.assert_all(struct_potential > 0)
def create_random_batched_dists(self, key: jax.random.KeyArray):
b, n, m = 3, 5, 6
log_potentials = jax.random.normal(key, (b, n, m))
return [AlignmentCRF(log_potentials, alignment_type=ttype)
for ttype in ("monotone_one_to_many", "monotone_many_to_many")]
def create_symmetric_batched_dists(self):
b, n = 3, 5
return [AlignmentCRF(jnp.zeros((b, n, n)),
alignment_type="monotone_many_to_many")]
def test_crash_on_invalid_shapes(self):
b = 3
m = 5
# pylint: disable=g-long-lambda
self.assertRaises(
ValueError, lambda: AlignmentCRF(
log_potentials=jnp.zeros((b, m, m-1)),
alignment_type="non_monotone_one_to_one"))
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assert_allclose(marginals, jnp.swapaxes(marginals, -1, -2))
self.assert_allclose(marginals,
jnp.rot90(jnp.swapaxes(marginals, -1, -2),
k=2, axes=(-1, -2)))
if dist.alignment_type == "monotone_many_to_many":
self.assert_all(marginals > 0)
def assert_batch_of_valid_samples(self, dist, samples):
transitions_count = jnp.sum(samples, (-1, -2))
self.assert_all(transitions_count >= max(*dist.event_shape))
self.assert_all(transitions_count <= sum(dist.event_shape)-1)
def assert_valid_marginals(self, dist, marginals):
if dist.alignment_type != "non_monotone_one_to_one":
self.assert_allclose(marginals[..., -1, -1], 1)
self.assert_allclose(marginals[..., 0, 0], 1)
self.assert_all(marginals.sum(-2) >= 0.98)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/alignment_simple_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution representing non-projective dependency trees."""
from __future__ import annotations
# pylint: disable=g-long-lambda
# pylint: disable=g-multiple-import, g-importing-member
from functools import partial
from typing import Literal, Optional, Tuple
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
from synjax._src.config import get_config
from synjax._src.constants import EPS, MTT_LOG_EPS
from synjax._src.deptree_algorithms import deptree_padding
from synjax._src.distribution import Distribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import autoregressive_decoding
from synjax._src.utils import special
SamplingAlgorithmName = Literal["colbourn", "wilson"]
@typed
def _optionally_shift_log_potentials(
log_potentials: Float[Array, "*batch n n"], single_root_edge: bool
) -> Tuple[Float[Array, "*batch n n"], Float[Array, "*batch"]]:
"""Makes log-potentials numerically more stable.
Modifies log_potentials to be more numerically stable without having
any impact on the tree distribution. Inspired by see Section D.2 from
Paulus et al (2020). This implementation is more stable than
Paulus et al because max normalization it is applied column-wise and in that
way guarantees that the maximum score of a tree is not bigger than 0, but it
breaks the symmetry if there was any.
References:
Paulus et al 2020 - Section D2: https://arxiv.org/pdf/2006.08063.pdf#page=26
Args:
log_potentials: Log-potentials of the graph.
single_root_edge: Whether to renormalize the root outgoing edges which is
valid only if single-root constraint is used.
Returns:
New log potentials with correction for log-partition.
"""
cfg = get_config()
if cfg.mtt_shift_log_potentials:
c_matrix = jnp.max(log_potentials, axis=-2, keepdims=True)
correction = jnp.sum(c_matrix[..., 0, 1:], -1)
if single_root_edge:
c_root = jnp.max(
log_potentials*jax.nn.one_hot(0, log_potentials.shape[-1])[:, None],
axis=-1, keepdims=True)
c_matrix += c_root
correction += c_root[..., 0, -1]
log_potentials -= jax.lax.stop_gradient(c_matrix.at[..., :, 0].set(0))
else:
correction = jnp.zeros(log_potentials.shape[:-2])
return log_potentials, jax.lax.stop_gradient(correction)
@typed
def _custom_slog_det(
x: Float[Array, "*batch n n"]
) -> Tuple[Float[Array, "*batch"], Float[Array, "*batch"]]:
cfg = get_config()
return special.safe_slogdet(x, logdet_method=cfg.mtt_logdet_method,
inv_method=cfg.mtt_inv_method,
matmul_precision=cfg.mtt_inv_matmul_precision,
test_invertability=False)
class SpanningTreeNonProjectiveCRF(Distribution):
"""Distribution representing non-projective dependency trees."""
single_root_edge: bool = eqx.static_field()
lengths: Int32[Array, "*batch"]
@typed
def __init__(self,
log_potentials: Float[Array, "*batch n n"],
*,
single_root_edge: bool,
lengths: Optional[Int32[Array, "*batch"]] = None):
self.single_root_edge = single_root_edge
if lengths is None:
batch_shape = log_potentials.shape[:-2]
lengths = jnp.full(batch_shape, log_potentials.shape[-1])
self.lengths = lengths
super().__init__(log_potentials=deptree_padding.pad_log_potentials(
log_potentials, self.lengths))
@property
def event_shape(self) -> Shape:
return self.log_potentials.shape[-2:]
@property
def max_nodes(self) -> int:
# Maximal number of nodes including ROOT node at position 0.
return self.log_potentials.shape[-1]
@typed
def argmax(self, **ignored_args) -> Float[Array, "*batch n n"]:
return mst_numpy_callback(self.log_potentials, self.lengths,
self.single_root_edge).astype(jnp.float32)
@typed
def argmax_and_max(self, **kwargs) -> Tuple[Float[Array, "*batch n n"],
Float[Array, "*batch"]]:
best = self.argmax(**kwargs)
score = self.unnormalized_log_prob(best)
return best, score
@typed
def top_k(self, k: int, approximate: bool = True
) -> Tuple[Float[Array, "k *batch n n"], Float[Array, "k *batch"]]:
"""This is an approximate top-k by using beam search over marginals.
Args:
k: The number of trees to return.
approximate: Use the approximate top-k algorithm.
Returns:
A tuple of trees (represented as adjecency matrices) and their logprobs.
"""
if k <= 0:
raise ValueError("k must be a strictly positive integer")
elif k == 1:
# This is a shortcut optimization for a special case.
best, score = self.argmax_and_max()
return best[None], score[None]
else:
if not approximate:
raise NotImplementedError("Non-Projective trees distribution supports"
"only 'approximate' top_k_algorithm.")
beam_state, _ = special.vmap_ndim(
lambda lp: autoregressive_decoding.beam_search(
init_state=State.initial(lp,
single_root_edge=self.single_root_edge),
max_length=self.max_nodes-1,
k=k), self.batch_ndim)(self.log_potentials)
trees = beam_state.sample
matrices = _to_adjacency_matrix(trees)
matrices = jnp.moveaxis(matrices, len(self.batch_shape), 0)
return matrices, self.unnormalized_log_prob(matrices)
@typed
def log_partition(self) -> Float[Array, "*batch"]:
log_potentials, correction = _optionally_shift_log_potentials(
self.log_potentials, self.single_root_edge)
laplacian_hat = _construct_laplacian_hat(log_potentials,
self.single_root_edge)
return correction + _custom_slog_det(laplacian_hat)[1]
@typed
def sample_without_replacement(
self, key: Key, k: int) -> Tuple[Float[Array, "k *batch n n"],
Float[Array, "k *batch"],
Float[Array, "k *batch"]]:
"""Sampling without replacement from Stanojević (2022).
References:
Stanojević, 2022: https://aclanthology.org/2022.emnlp-main.110.pdf
Args:
key: Sampling key.
k: Number of swor samples.
Returns:
Tuple of (samples, logprobs, gumbel perturbed logprobs)
"""
beam_state, logprobs, gumbels = special.vmap_ndim(
lambda rng, lp: autoregressive_decoding.stochastic_beam_search(
key=rng,
init_state=State.initial(lp,
single_root_edge=self.single_root_edge),
max_length=self.max_nodes-1,
k=k), self.batch_ndim
)(special.split_key_for_shape(key, self.batch_shape),
self.log_potentials)
sampled_trees = beam_state.sample
sampled_matrices = _to_adjacency_matrix(sampled_trees)
move = lambda x: jnp.moveaxis(x, len(self.batch_shape), 0)
return move(sampled_matrices), move(logprobs), move(gumbels)
@typed
def _single_sample(self, key: Key,
algorithm: SamplingAlgorithmName = "colbourn"
) -> Float[Array, "*batch n n"]:
if algorithm == "colbourn":
final_states: State
final_states, _, _ = special.vmap_ndim(
lambda rng, lp: autoregressive_decoding.single_ancestral_sample(
init_state=State.initial(lp,
single_root_edge=self.single_root_edge),
key=rng, max_length=self.max_nodes-1, unroll=1),
self.batch_ndim
)(special.split_key_for_shape(key, self.batch_shape),
self.log_potentials)
sampled_trees = final_states.sample
sampled_matrices = _to_adjacency_matrix(sampled_trees)
elif algorithm == "wilson":
sampled_matrices = sample_wilson_numpy_callback(
self.log_potentials, self.lengths, self.single_root_edge
).astype(jnp.float32)
else:
raise NotImplementedError(
f"sampling_algorithm {algorithm:r} not supported")
return sampled_matrices
@typed
def _to_adjacency_matrix(tree: Int32[Array, "*batch n"]
) -> Float[Array, "*batch n n"]:
return jax.nn.one_hot(tree, tree.shape[-1], dtype=jnp.float32, axis=-2
).at[..., 0].set(0)
@typed
class State(autoregressive_decoding.State):
"""Implements a state of a Colbourn sampler for spanning trees.
Original algorithm presented in Colbourn et al (1996) for spanning trees.
Zmigrod et al (2021) adapt the algorithm to single-root dependency trees.
Here we use presentation from Stanojević (2022) that is easier to adapt for
more compelex use cases such as sampling without replacement.
References:
Stanojević, 2022: https://aclanthology.org/2022.emnlp-main.110.pdf
Zmigrod et al, 2021: https://aclanthology.org/2021.emnlp-main.824v2.pdf
Colbourn et al, 1996: https://www.sciencedirect.com/science/article/pii/S0196677496900140
""" # pylint: disable=line-too-long
potentials: Float[Array, "n n"]
laplacian: Float[Array, "n-1 n-1"]
laplacian_invt: Float[Array, "n-1 n-1"]
j: Int32[Array, ""]
sample: Int32[Array, "n"]
single_root_edge: bool = eqx.static_field()
@typed
def logprobs(self) -> Float[Array, "n"]:
marginals = _marginals_with_given_laplacian_invt(
jnp.log(self.potentials), self.laplacian_invt,
single_root_edge=self.single_root_edge)
return special.safe_log(marginals[:, self.j])
@typed
def apply_transition(self, a: Int32[Array, ""]) -> State:
potentials, laplacian, laplacian_invt = self._constrain_graph(a)
sample = self.sample.at[self.j].set(a)
state = State(potentials=potentials, laplacian=laplacian,
laplacian_invt=laplacian_invt, j=self.j + 1, sample=sample,
single_root_edge=self.single_root_edge)
return state
@typed
def _constrain_graph(self, i: Int32[Array, ""]
) -> Tuple[Float[Array, "n n"], Float[Array, "n-1 n-1"],
Float[Array, "n-1 n-1"]]:
potentials_old, laplacian_old, laplacian_invt_old, j = (
self.potentials, self.laplacian, self.laplacian_invt, self.j)
constrained_incoming = jax.nn.one_hot(i, potentials_old.shape[-1])
potentials = potentials_old.at[..., j].set(constrained_incoming)
laplacian = _construct_laplacian_hat(jnp.log(potentials), self.single_root_edge)
uj = laplacian[:, j - 1] - laplacian_old[:, j - 1]
bj = laplacian_invt_old[:, j - 1]
den = 1 + uj.T @ bj
# Application of Sherman-Morrison formula.
update = jnp.outer(bj, uj.T @ laplacian_invt_old)/jnp.where(den, den, EPS)
laplacian_invt = laplacian_invt_old - update
return potentials, laplacian, laplacian_invt
@staticmethod
@typed
def initial(log_potentials: Float[Array, "n n"], single_root_edge: bool) -> State:
empty_sample = jnp.empty(log_potentials.shape[-1], dtype=jnp.int32)
laplacian = _construct_laplacian_hat(log_potentials,
single_root_edge=single_root_edge)
laplacian_invt = jnp.linalg.inv(laplacian).T
return State(potentials=jnp.exp(log_potentials), laplacian=laplacian,
laplacian_invt=laplacian_invt, j=jnp.int32(1),
single_root_edge=single_root_edge, sample=empty_sample)
@typed
def _construct_laplacian_hat(
log_potentials: Float[Array, "*batch n n"], single_root_edge: bool
) -> Float[Array, "*batch n-1 n-1"]:
"""Computes a graph Laplacian-hat matrix as in Koo et al (2007).
This is not a Laplacian matrix, but Laplacian-hat matrix. It is constructed by
applying the right modification to a regular Laplacian matrix so that a
determinant of Laplacian-hat gives partition function of all spanning trees.
References:
Koo et al, 2007: https://aclanthology.org/D07-1015.pdf
Args:
log_potentials: Weight matrix with log-potential entries.
single_root_edge: Whether to use a single-root constraint
Returns:
Laplacian matrix.
"""
potentials = jnp.exp(jnp.logaddexp(log_potentials, MTT_LOG_EPS))
potentials *= 1-jnp.eye(potentials.shape[-1]) # Removing self-edges
laplacian = lambda x: x.sum(axis=-2, keepdims=True) * jnp.eye(x.shape[-1]) - x
cut = lambda x: x[..., 1:, 1:]
if single_root_edge:
return laplacian(cut(potentials)).at[..., 0, :].set(potentials[..., 0, 1:])
else:
return cut(laplacian(potentials))
@typed
def _marginals_with_given_laplacian_invt(
log_potentials: Float[Array, "*batch n n"],
laplacian_invt: Float[Array, "*batch n-1 n-1"], single_root_edge: bool
) -> Float[Array, "*batch n n"]:
"""Computes marginals in cases where the inverse of the Laplacian is provided.
This implementation exploits automatic differantiation concise implementation.
This function is vector-Jacobian product of a function that constructs
laplacian-hat matrix where primals are log-potentials and tangents come from
inverse-transpose of the Laplacian-hat.
For the explicit definition that doesn't use automatic differentation see
Section 3.2 of Koo et al (2007) or NumPy implementation of this function
within SynJax.
References:
Koo et al, 2007: https://aclanthology.org/D07-1015.pdf#page=5
Args:
log_potentials: Weight matrix with log-potential entries.
laplacian_invt: Inverse-transpose of the Laplacian-hat matrix.
single_root_edge: Whether to use a single-root constraint.
Returns:
Matrix of marginals.
"""
_, vjp = jax.vjp(partial(_construct_laplacian_hat,
single_root_edge=single_root_edge), log_potentials)
return vjp(laplacian_invt)[0]
@jax.custom_gradient
def sample_wilson_numpy_callback(
log_potentials: jax.Array, lengths: jax.Array, single_root_edge: bool
) -> jax.Array:
"""JAX-to-Numba callback for vectorized sampling of spanning trees."""
# The import is located here so that if users do not
# call Numba code the Numba compilation won't be triggered and potential
# irrelevant compilation errors won't appear.
# pylint: disable=g-import-not-at-top
# pylint: disable=import-outside-toplevel
from synjax._src.deptree_algorithms import deptree_non_proj_wilson_sampling
result_shape = jax.ShapeDtypeStruct(log_potentials.shape[:-1], jnp.int32)
# pylint: disable=g-long-lambda
f = lambda *x: deptree_non_proj_wilson_sampling.vectorized_sample_wilson(
*x).astype(jnp.int32)
trees = jax.pure_callback(f, result_shape, log_potentials, lengths,
single_root_edge, vectorized=True)
# pytype: disable=bad-return-type
return (_to_adjacency_matrix(trees),
lambda g: (jnp.zeros_like(log_potentials), None, None, None))
# pytype: enable=bad-return-type
@jax.custom_gradient
def mst_numpy_callback(log_potentials: jax.Array, lengths: jax.Array,
single_root_edge: bool) -> jax.Array:
"""JAX-to-Numba callback for vectorized Tarjan's maximum spanning tree."""
# The import is located here so that if users do not call Numba code the
# Numba compilation won't be triggered and potential irrelevant
# compilation errors won't appear.
# pylint: disable=g-import-not-at-top
# pylint: disable=import-outside-toplevel
from synjax._src.deptree_algorithms import deptree_non_proj_argmax
result_shape = jax.ShapeDtypeStruct(log_potentials.shape[:-1], jnp.int32)
trees = jax.pure_callback(
lambda *x: deptree_non_proj_argmax.vectorized_mst(*x).astype(jnp.int32),
result_shape, log_potentials, lengths, single_root_edge, vectorized=True)
# pytype: disable=bad-return-type
return (_to_adjacency_matrix(trees),
lambda g: (jnp.zeros_like(log_potentials), None, None))
# pytype: enable=bad-return-type
|
synjax-master
|
synjax/_src/spanning_tree_non_projective_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semi_markov_crf."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src import linear_chain_crf
from synjax._src import semi_markov_crf
def chain_is_connected(samples) -> bool:
scores = jax.lax.associative_scan(jnp.matmul, samples, axis=-3)
scores = scores[..., -1, :, :].sum((-1, -2))
return jnp.all(scores == 1)
class SemiMarkovCRFTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key):
b, n, m, t = 3, 6, 3, 4
log_potentials = jnp.log(jax.random.uniform(key, (b, n, m, t, t)))
return [semi_markov_crf.SemiMarkovCRF(log_potentials)]
def create_symmetric_batched_dists(self):
b, n, m, t = 3, 6, 3, 4
log_potentials = jnp.zeros((b, n, m, t, t))
return [semi_markov_crf.SemiMarkovCRF(log_potentials)]
def create_invalid_shape_distribution(self):
b, n, m, t = 3, 6, 3, 4
log_potentials = jnp.zeros((b, n, m, t+1, t))
return semi_markov_crf.SemiMarkovCRF(log_potentials)
def assert_is_symmetric(self, dist, marginals) -> bool:
# There is no simple symmetric constraint to test against.
pass
def assert_batch_of_valid_samples(self, dist, samples):
labels = semi_markov_crf.SemiMarkovCRF.convert_sample_to_element_labels(
samples)
n = labels.shape[-2]
self.assert_allclose(jnp.cumsum(labels.sum(-1), -1), jnp.arange(1, n+1))
def assert_valid_marginals(self, dist, marginals):
active_edges_weight = marginals.sum((-1, -2, -3))
self.assert_allclose(active_edges_weight[..., -1], 1)
def test_semi_markov_simple_agrees_with_linear_chain_crf(self):
b, n, t = 3, 6, 4
key = jax.random.PRNGKey(0)
log_potentials = jnp.log(jax.random.uniform(key, (b, n, t, t)))
dist1 = linear_chain_crf.LinearChainCRF(log_potentials)
dist2 = semi_markov_crf.SemiMarkovCRF(
jnp.concatenate([log_potentials[..., None, :, :],
jnp.full((b, n, 3, t, t), -1e5)], axis=-3))
m1 = dist1.marginals()
m2 = dist2.marginals().max(-3)
self.assert_allclose(m1, m2)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/semi_markov_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for spanning_tree_projective_crf."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src import distribution_test
from synjax._src import spanning_tree_projective_crf
from synjax._src.deptree_algorithms import deptree_non_proj_argmax
from synjax._src.utils import special
SpanningTreeProjectiveCRF = (
spanning_tree_projective_crf.SpanningTreeProjectiveCRF)
class SpanningTreeProjectiveTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key: jax.random.KeyArray):
b = 3
n_words = 5
log_potentials = jax.random.normal(key, (b, n_words+1, n_words+1))
dists = [SpanningTreeProjectiveCRF(
log_potentials=log_potentials, lengths=None,
single_root_edge=single_root_edge)
for single_root_edge in [True, False]]
return dists
def create_invalid_shape_distribution(self):
return SpanningTreeProjectiveCRF(
log_potentials=jnp.zeros((3, 6, 5)), lengths=None,
single_root_edge=True)
def test_Eisner_and_Kuhlmann_argmax_agree(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
kuhlmann_argmax, kuhlmann_max = dist.argmax_and_max(algorithm="Kuhlmann")
eisner_argmax, eisner_max = dist.argmax_and_max(algorithm="Eisner")
self.assert_allclose(kuhlmann_argmax, eisner_argmax)
self.assert_allclose(kuhlmann_max, eisner_max)
def create_symmetric_batched_dists(self):
b = 3
n_words = 5
log_potentials = jnp.zeros((b, n_words+1, n_words+1))
dists = [SpanningTreeProjectiveCRF(
log_potentials=log_potentials, lengths=None,
single_root_edge=single_root_edge)
for single_root_edge in [True, False]]
return dists
def analytic_log_count(self, dist) -> jax.Array:
"""Computes the log of the number of the projective trees in the support.
The number of projective trees in multi-root case is computed using
Theorem 2 from Yuret (1998, page 29).
https://arxiv.org/pdf/cmp-lg/9805009.pdf
For single-root custom adaptation of the multi-root case and will be
explained in the technical report.
Args:
dist: Projective trees distribution object.
Returns:
The log of the number of the projective trees.
"""
def multi_root_projective_log_count(n_words):
return special.log_comb(3*n_words, n_words) - jnp.log(2*n_words+1)
if dist.single_root_edge:
max_n = dist.log_potentials.shape[-1]-1
first_term = multi_root_projective_log_count(jnp.arange(max_n))
second_term = multi_root_projective_log_count(
dist.lengths[..., None]-jnp.arange(max_n)-2)
mask = jnp.arange(max_n) < dist.lengths[..., None]-1
to_sum = jnp.where(mask, first_term + second_term, -constants.INF)
return jax.scipy.special.logsumexp(to_sum, axis=-1)
else:
return multi_root_projective_log_count(dist.lengths-1)
def assert_is_symmetric(self, dist, marginals) -> bool:
sub_matrix = marginals[..., 1:, 1:]
self.assert_allclose(sub_matrix, jnp.rot90(sub_matrix, 2, (-1, -2)))
root_marginals = marginals[..., 0, 1:]
self.assert_allclose(root_marginals, root_marginals[..., ::-1])
def assert_batch_of_valid_samples(self, dist, samples):
trees = np.asarray(jnp.argmax(samples, -2).reshape(-1, samples.shape[-1]))
for tree in trees:
self.assertTrue(deptree_non_proj_argmax.is_projective_tree(tree))
n_words = trees.shape[-1]-1
self.assert_allclose(jnp.diagonal(samples, axis1=-2, axis2=-1), 0)
self.assert_allclose(samples[..., 0], 0)
if dist.single_root_edge:
self.assert_allclose(jnp.count_nonzero(trees[..., 1:], axis=-1),
n_words - 1)
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(
jnp.sum(jnp.sum(marginals, -2)[..., 1:], -1),
marginals.shape[-1]-1)
self.assert_allclose(jnp.diagonal(marginals, axis1=-2, axis2=-1), 0)
self.assert_allclose(marginals[..., 0], 0)
if dist.single_root_edge:
self.assert_allclose(jnp.sum(marginals[:, 0, 1:], axis=-1), 1)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/spanning_tree_projective_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution representing Semi-Markov CRF for linear chains."""
from typing import Optional
import jax
import jax.numpy as jnp
# pylint: disable=g-multiple-import, g-importing-member
from jaxtyping import Array, Float, Int32, Num
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import special
from synjax._src.utils.semirings import Semiring
class SemiMarkovCRF(SemiringDistribution):
"""Distribution representing semi-Markov CRFs.
Semi-Markov CRF was defined by Sarawagi and Cohen (2004). Similar model was
used in speech recognition under the name of Segmental CRF
(Abdel-Hamid et al, 2013; Lu et al, 2016). The main difference is that
Segmental CRF predicts label independently of each other which is a special
case of Semi-Markov CRF.
References:
Sarawagi and Cohen, 2004: https://proceedings.neurips.cc/paper/2004/file/eb06b9db06012a7a4179b8f3cb5384d3-Paper.pdf
Abdel-Hamid et al, 2013: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/SegmentalNN.pdf
Lu et al, 2016: https://www.isca-speech.org/archive_v0/Interspeech_2016/pdfs/0040.PDF
""" # pylint: disable=line-too-long
log_potentials: Float[Array, "*batch n skip state state"]
lengths: Int32[Array, "*batch"]
@typed
def __init__(self, log_potentials: Float[Array, "*batch n skip state state"],
*, lengths: Optional[Int32[Array, "*batch"]] = None):
"""Constructs Semi-Markov CRF distribution.
References:
Sarawagi and Cohen, 2004: https://proceedings.neurips.cc/paper/2004/file/eb06b9db06012a7a4179b8f3cb5384d3-Paper.pdf
Abdel-Hamid et al, 2013: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/SegmentalNN.pdf
Lu et al, 2016: https://www.isca-speech.org/archive_v0/Interspeech_2016/pdfs/0040.PDF
Args:
log_potentials:
For a sentence of n words log_potentials will have shape
(..., n, m, t, t). The entry of log_potentials[i, j, t1, t2] represents
the log_potential of an edge (i-j, t1) -> (i, t2). In other words
log_potentials[i, j] are representing edges entering word at position i
by jumping from word at position i-j-1.
Zero-th transition matrix shows transitions from initial state
(non-word state) into the first word at position 0.
lengths:
Lengths of each entry in the batch. It has the same shape as the batch
and dtype of jnp.int32. If it's not passed, the maximal length will be
assumed based on the log_potentials.shape[-4].
""" # pylint: disable=line-too-long
super().__init__(log_potentials=log_potentials)
if lengths is None:
lengths = jnp.full(self.batch_shape, self.event_shape[0])
self.lengths = lengths
@property
def event_shape(self) -> Shape:
return self.log_potentials.shape[-4:]
@typed
def _structure_forward(
self, base_struct: Float[Array, "n skip state state"],
semiring: Semiring, key: Key) -> Float[Array, "s"]:
"""Forward algorithm with complexity O(n m t^2)."""
n, m, t = self.log_potentials.shape[-4:-1]
def loop(state, inp):
transitions, key = inp
out = semiring.einsum("smi,smij->sj", state, transitions, key=key)
state = jnp.roll(state, 1, axis=-2).at[:, 0].set(out)
return state, out
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
keys = jax.random.split(key, n+1) # (n+1, 2)
seq = semiring.wrap(base_struct + self.log_potentials) # (s, n, m, t, t)
seq = jnp.swapaxes(seq, 0, 1) # (n, s, m, t, t)
state = semiring.wrap(jnp.full((m, t), -INF).at[0, 0].set(0))
_, states = jax.lax.scan(loop, state, (seq, keys[:-1]))
state = states[self.lengths-1]
return semiring.sum(state, axis=-1, key=keys[-1])
@classmethod
@typed
def convert_sample_to_element_labels(
cls, sample: Num[Array, "*xs n skip label label"]
) -> Num[Array, "*xs n label"]:
"""Converts samples from standard edge shape to a sequence of labels.
Args:
sample: Array of shape (..., n, m, t) where n is the sequence length,
m is the skip size and t is the label.
Returns:
Array of shape (..., n, t) where each element (..., a, b) is 1 if
sample has an arc covering position a with label b, otherwise it is 0.
"""
# This function is exposed as a classmethod in order for it to be accessible
# from the public interface of SynJax.
n, m = sample.shape[-4:-2]
labels = sample.sum(-2) # (..., n, m, t)
labels2 = jax.lax.cumsum(labels, -2 % labels.ndim, reverse=True)
def roll_and_mask(x, step):
mask = jnp.arange(n)[:, None] < n-step
return jnp.where(mask, special.roll(x, -step, -2), 0)
labels3 = jax.vmap(roll_and_mask, in_axes=(-2, 0), out_axes=-2
)(labels2, jnp.arange(m))
labels4 = labels3.sum(-2) # (..., n, t)
labels4.sum(-1)
return labels4
|
synjax-master
|
synjax/_src/semi_markov_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for CTC distribution."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import ctc
from synjax._src import distribution_test
class CtcTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key: jax.random.KeyArray):
b, n, v, l = 2, 12, 400, 6
key1, key2 = jax.random.split(key, 2)
log_potentials = jax.random.uniform(key1, (b, n, v))
labels = jax.random.randint(key2, (b, l), 1, v)
label_lengths = jnp.full(b, l)
input_lengths = jnp.full(b, n)
blank_id = 0
dists = [ctc.CTC(log_potentials, labels, label_lengths=label_lengths,
input_lengths=input_lengths, blank_id=blank_id)]
return dists
def create_symmetric_batched_dists(self):
b, n, l = 2, 6, 6
v = n
log_potentials = jnp.zeros((b, n, v))
labels = jnp.arange(1, n+1) * jnp.ones((b, 1), dtype=jnp.int32)
label_lengths = jnp.full(b, l)
input_lengths = jnp.full(b, n)
blank_id = 0
dists = [ctc.CTC(log_potentials, labels, label_lengths=label_lengths,
input_lengths=input_lengths, blank_id=blank_id)]
return dists
def create_invalid_shape_distribution(self):
b, n, l = 2, 6, 6
v = n
log_potentials = jnp.zeros((b, 1, n, v))
labels = jnp.arange(1, n+1) * jnp.ones((b, 1), dtype=jnp.int32)
label_lengths = jnp.full(b, l)
input_lengths = jnp.full(b, n)
blank_id = 0
dists = [ctc.CTC(log_potentials, labels, label_lengths=label_lengths,
input_lengths=input_lengths, blank_id=blank_id)]
return dists
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assert_all(marginals >= 0)
def assert_batch_of_valid_samples(self, dist, samples):
transitions_count = jnp.sum(samples, (-1, -2))
self.assert_all(transitions_count >= min(*dist.event_shape))
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(marginals[..., -2:, -1].sum(-1), 1)
self.assert_allclose(marginals[..., :2, 0].sum(-1), 1)
def test_CTC_loss_against_optax(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.assert_allclose(dist.loss(use_optax=True),
dist.loss(use_optax=False))
self.assert_allclose(dist.log_partition(use_optax=True),
dist.log_partition(use_optax=False))
self.assert_all(dist.loss(use_optax=True) > 0)
def test_alignment_to_labels(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
kbest_alignments = dist.top_k(3)[0]
kbest_labelings = dist.alignment_to_labels(kbest_alignments)
dist.log_prob_labels(kbest_labelings)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/ctc_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for constituency_tensor_decomposition_pcfg."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import constituency_tensor_decomposition_pcfg as td
from synjax._src import distribution_test
from synjax._src.utils import special
def is_symmetric(x, axis1, axis2):
x1 = jnp.rot90(x, axes=(axis1, axis2))
x2 = jnp.swapaxes(x1, axis1, axis2)
return jnp.allclose(x2, x1)
class TensorDecompositionPCFGTest(distribution_test.DistributionTest):
def test_argmax(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.assertRaises(NotImplementedError, dist.argmax)
def test_sampling(self):
# Sampling is not supported.
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.assertRaises(NotImplementedError, dist.sample, jax.random.PRNGKey(0))
def test_top_k(self):
# top_k is not supported.
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.assertRaises(NotImplementedError, dist.top_k, 2)
def create_invalid_shape_distribution(self):
b, n, voc, nt, pt, r = 2, 6, 10, 2, 3, 2
f = jnp.zeros
log_potentials = dict(
root=f((b, nt)),
nt_to_rank=f((b, nt, r)),
rank_to_left_nt=f((b, r, nt)),
rank_to_right_nt=f((b, r, nt+pt)),
emission=f((b, pt, voc)),
)
word_ids = jax.random.randint(jax.random.PRNGKey(0), (b, n), 0, voc)
return td.TensorDecompositionPCFG(**log_potentials, word_ids=word_ids)
def analytic_log_count(self, dist) -> jax.Array:
log_rank_combs = (dist.lengths-1)*jnp.log(dist.size_rank)
log_nt_combs = (dist.lengths-1) * jnp.log(
dist.size_nonterminals)
log_pt_combs = dist.lengths * jnp.log(dist.size_preterminals)
return (special.log_catalan(dist.lengths-1) + log_rank_combs
+ log_nt_combs + log_pt_combs)
def create_random_batched_dists(self, key: jax.random.KeyArray):
f = jax.random.uniform
keys = jax.random.split(key, 6)
b, n, voc, nt, pt, r = 2, 6, 10, 2, 3, 2
log_potentials = dict(
root=f(keys[0], (b, nt)),
nt_to_rank=f(keys[1], (b, nt, r)),
rank_to_left_nt=f(keys[2], (b, r, nt+pt)),
rank_to_right_nt=f(keys[3], (b, r, nt+pt)),
emission=f(keys[4], (b, pt, voc)),
)
word_ids = jax.random.randint(keys[5], (b, n), 0, voc)
return [td.TensorDecompositionPCFG(**log_potentials, word_ids=word_ids)]
def create_symmetric_batched_dists(self):
f = jnp.zeros
b, n, voc, nt, pt, r = 2, 6, 10, 2, 3, 2
log_potentials = dict(
root=f((b, nt)),
nt_to_rank=f((b, nt, r)),
rank_to_left_nt=f((b, r, nt+pt)),
rank_to_right_nt=f((b, r, nt+pt)),
emission=f((b, pt, voc)),
)
word_ids = jax.random.randint(jax.random.PRNGKey(0), (b, n), 0, voc)
return [td.TensorDecompositionPCFG(**log_potentials, word_ids=word_ids)]
def assert_is_symmetric(self, dist, marginals) -> bool:
del dist
chart_marginals, preterm_marginals = marginals
self.assertTrue(is_symmetric(chart_marginals, 1, 2))
self.assert_allclose(preterm_marginals, preterm_marginals[..., ::-1, :])
def assert_batch_of_valid_samples(self, dist, samples):
chart, preterms = samples
self.assert_allclose(chart.sum((-1, -2, -3)), dist.lengths-1)
self.assert_allclose(preterms.sum((-1, -2)), dist.lengths)
def assert_valid_marginals(self, dist, marginals):
chart_marginals, preterminal_marginals = marginals
span_marginals = chart_marginals.sum(-1)
for i in range(dist.batch_shape[0]):
n = dist.lengths[i]
root_prob = span_marginals[i, 0, n-1]
self.assert_allclose(root_prob, 1)
self.assert_zeros_and_ones(preterminal_marginals.sum(-1))
self.assert_allclose(preterminal_marginals.sum((-1, -2)), dist.lengths)
def test_argmax_can_be_jitted(self):
pass
def test_sampling_can_be_jitted(self):
pass
def test_mbr(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
assert dist.batch_shape
best = dist.mbr(marginalize_labels=False)
self.assert_zeros_and_ones(best)
self.assert_batch_of_valid_samples(dist, best)
self.assert_valid_marginals(dist, best)
def test_entropy_cross_entropy(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
assert dist.batch_shape
self.assert_allclose(dist.entropy(), dist.cross_entropy(dist))
self.assert_allclose(dist.kl_divergence(dist), 0)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/constituency_tensor_decomposition_pcfg_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for constituency_tree_crf."""
# pylint: disable=g-importing-member
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src.constituency_tree_crf import TreeCRF
from synjax._src.utils import special
def is_symmetric(x, axis1, axis2):
x1 = jnp.rot90(x, axes=(axis1, axis2))
x2 = jnp.swapaxes(x1, axis1, axis2)
return jnp.allclose(x2, x1)
class TreeCrfTest(distribution_test.DistributionTest):
def create_random_batched_dists(self, key: jax.random.KeyArray):
b, n, t = 3, 5, 2
log_potentials = jnp.log(jax.random.uniform(key, (b, n, n, t)))
lengths = jnp.array(list(range(n-b+1, n+1)))
return [TreeCRF(log_potentials, lengths=lengths)]
def create_symmetric_batched_dists(self):
b, n, t = 1, 5, 4
log_potentials = jnp.zeros((b, n, n, t))
return [TreeCRF(log_potentials, lengths=None)]
def create_invalid_shape_distribution(self):
b, n, t = 1, 5, 4
log_potentials = jnp.zeros((b, n, n-1, t))
return TreeCRF(log_potentials, lengths=None)
def analytic_log_count(self, dist) -> jax.Array:
# Note: terminal labels are included as part of the combinatorial structure.
log_tree_count = special.log_catalan(dist.lengths-1)
t = dist.log_potentials.shape[-1]
log_nt_labeling_count = (2*dist.lengths-1) * jnp.log(t)
return log_tree_count + log_nt_labeling_count
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assertTrue(is_symmetric(marginals, 1, 2))
def assert_batch_of_valid_samples(self, dist, samples):
self.assert_allclose(jnp.sum(samples, axis=(-1, -2, -3)), dist.lengths*2-1)
def assert_valid_marginals(self, dist, marginals):
for i in range(dist.batch_shape[0]):
n = dist.lengths[i]
root_prob = marginals[i, 0, n-1, :].sum(-1)
self.assert_allclose(root_prob, 1)
terminals = jnp.diagonal(marginals.sum(-1), axis1=1, axis2=2) # b, n
self.assert_all(jnp.isclose(terminals, 1) | jnp.isclose(terminals, 0))
self.assert_allclose(terminals.sum(-1), dist.lengths)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/constituency_tree_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Tree CRF that models constituency trees.
References:
Stern et al 2017 -- https://aclanthology.org/P17-1076.pdf
"""
from typing import Optional
import jax
import jax.numpy as jnp
# pylint: disable=g-multiple-import, g-importing-member
from jaxtyping import Array, Float, Int32
from synjax._src.config import get_config
from synjax._src.distribution import SemiringDistribution
from synjax._src.typing import Shape, Key, typed
from synjax._src.utils import chart_struct
from synjax._src.utils.semirings import Semiring
class TreeCRF(SemiringDistribution):
"""Globally normalized istribution over binary constituency trees.
The model structure is very similar to Stern et al (2017) except SynJax
additionally supports properly normalizing the distribution.
References:
Stern et al 2017 -- https://aclanthology.org/P17-1076.pdf
"""
log_potentials: Float[Array, "*batch n n label"]
lengths: Int32[Array, "*batch"]
@typed
def __init__(self,
log_potentials: Float[Array, "*batch n n label"],
*,
lengths: Optional[Int32[Array, "*batch"]] = None):
super().__init__(log_potentials=log_potentials)
if lengths is None:
*batch_shape, n = log_potentials.shape[:-2]
lengths = jnp.full(batch_shape, n)
self.lengths = lengths
@property
def event_shape(self) -> Shape:
return self.log_potentials.shape[-3:]
@typed
def _structure_forward(self, base_struct: Float[Array, "n n label"],
semiring: Semiring, key: Key) -> Float[Array, "s"]:
n = self.event_shape[0]
keys = jax.random.split(key, n+1)
param = semiring.wrap(base_struct+self.log_potentials)
chart = chart_struct.from_cky_table(semiring.sum(param, axis=3,
key=keys[1]))
def loop(chart: chart_struct.Chart, d: Array):
new = semiring.einsum("sij,sij,si->si", chart.left(),
chart.right(d, semiring), chart.get_entries(d),
key=keys[d])
return chart.set_entries(d, new), None
if get_config().checkpoint_loops:
loop = jax.checkpoint(loop)
chart, _ = jax.lax.scan(loop, chart, jnp.arange(2, n+1))
return chart.pick_length(self.lengths)
|
synjax-master
|
synjax/_src/constituency_tree_crf.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common types used in SynJax."""
from typing import Tuple
import jaxtyping
import typeguard
Shape = Tuple[int, ...]
Key = jaxtyping.PRNGKeyArray
typed = lambda fn: jaxtyping.jaxtyped(typeguard.typechecked(fn))
|
synjax-master
|
synjax/_src/typing.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract test template for all distributions."""
import functools
from typing import List
from absl.testing import parameterized
# pylint: disable=g-importing-member
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.distribution import Distribution
class DistributionTest(parameterized.TestCase):
def assert_zeros_and_ones(self, x):
leaves = jax.tree_util.tree_flatten(x)[0]
is_close = lambda a, b: jnp.isclose( # pylint: disable=g-long-lambda
a, b, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
self.assertTrue(
all(map(lambda x: jnp.all(is_close(x, 0) | is_close(x, 1)), leaves)),
msg="Edges must be 0s and 1s only.")
def assert_all(self, x, *, msg=""):
self.assertTrue(all(map(jnp.all, jax.tree_util.tree_flatten(x)[0])),
msg=msg)
def assert_no_duplicates_in_first_axis(self, x):
def tree_equal(a, b):
a_leaves, _ = jax.tree_util.tree_flatten(a)
b_leaves, _ = jax.tree_util.tree_flatten(b)
leaf_matches = [jnp.allclose(a_leaf, b_leaf)
for a_leaf, b_leaf in zip(a_leaves, b_leaves)]
return functools.reduce(jnp.logical_and, leaf_matches)
def vector_match(y, ys):
return jax.vmap(tree_equal, in_axes=(None, 0))(y, ys)
matrix_match = jax.vmap(vector_match, in_axes=(0, None))(x, x)
self.assert_allclose(matrix_match,
jnp.eye(matrix_match.shape[-1], dtype=bool))
def assert_allclose(self, x, y, *, msg="",
rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE):
# This is different from standard np.test.assert_allclose in that
# it allows for lower precision by default and it allows pytrees and
# different shapes as long as they are broadcastable.
def array_all_close(a, b):
a = np.asarray(a)
b = np.asarray(b)
broadcasting_shape = np.broadcast_shapes(a.shape, b.shape)
a = np.broadcast_to(a, broadcasting_shape)
b = np.broadcast_to(b, broadcasting_shape)
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
jax.tree_map(array_all_close, x, y)
def create_random_batched_dists(self, key: jax.random.KeyArray
) -> List[Distribution]:
raise NotImplementedError
def create_symmetric_batched_dists(self) -> List[Distribution]:
raise NotImplementedError
def assert_is_symmetric(self, dist: Distribution, marginals: jax.Array
) -> bool:
raise NotImplementedError
def assert_batch_of_valid_samples(self, dist: Distribution, samples: jax.Array
):
raise NotImplementedError
def assert_valid_marginals(self, dist: Distribution, marginals: jax.Array):
raise NotImplementedError
def analytic_log_count(self, dist: Distribution) -> jax.Array:
"""Computes log count of structrues analytically."""
raise NotImplementedError
def create_invalid_shape_distribution(self) -> Distribution:
raise NotImplementedError
def test_crash_on_invalid_shapes(self):
self.assertRaises(Exception, self.create_invalid_shape_distribution)
def test_symmetric(self):
for dist in self.create_symmetric_batched_dists():
assert dist.batch_shape
self.assert_is_symmetric(dist, dist.marginals())
def test_log_count(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
log_predicted = dist.log_count()
self.assert_all(log_predicted >= 0)
predicted = jnp.exp(log_predicted)
self.assert_all(jnp.round(predicted)-predicted < 0.1)
try:
self.assert_allclose(log_predicted, self.analytic_log_count(dist))
except NotImplementedError:
pass
def test_marginals(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
assert dist.batch_shape
m = dist.marginals()
self.assert_all(jax.tree_map(lambda x: (0 <= x) & (x <= 1.0001), m),
msg="Marginals must be between 0 and 1")
self.assert_all(
jax.tree_map(lambda x: jax.vmap(jnp.any)(0 < x), m),
msg="Some marginals must be > 0")
self.assert_valid_marginals(dist, m)
def test_argmax(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
assert dist.batch_shape
best = dist.argmax()
self.assert_zeros_and_ones(best)
self.assert_batch_of_valid_samples(dist, best)
self.assert_valid_marginals(dist, best)
probs = jnp.exp(dist.log_prob(best))
self.assertEqual(probs.shape, dist.batch_shape)
self.assert_all((probs > 0) & (probs <= 1))
def test_sampling(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
assert dist.batch_shape
k = 5
samples = dist.sample(jax.random.PRNGKey(0), k)
self.assert_zeros_and_ones(samples)
self.assert_batch_of_valid_samples(dist, samples)
# pylint: disable=cell-var-from-loop
self.assert_all(jax.tree_map(lambda x: x.shape[0] == k, samples))
prob = jnp.exp(dist.log_prob(samples))
self.assert_all((0 < prob) & (prob <= 1))
def test_entropy_cross_entropy(self):
for dist, dist2 in zip(
self.create_random_batched_dists(jax.random.PRNGKey(0)),
self.create_random_batched_dists(jax.random.PRNGKey(1))):
assert dist.batch_shape
assert dist2.batch_shape
entropy = dist.entropy()
self_cross_entropy = dist.cross_entropy(dist)
self_kl_divergence = dist.kl_divergence(dist)
self.assert_allclose(entropy, self_cross_entropy)
self.assert_all(entropy > 0)
self.assert_all(self_cross_entropy > 0)
self.assert_all(self_kl_divergence == 0)
self.assert_all(dist2.kl_divergence(dist) > 0)
def check_top_k_single_dist(self, dist: Distribution,
check_prefix_condition: bool = True):
# pylint: disable=cell-var-from-loop
assert len(dist.batch_shape) == 1
k = 4
best_k, best_k_scores_direct = dist.top_k(k)
self.assert_zeros_and_ones(best_k)
self.assert_all(jax.tree_map(lambda x: x.shape[0] == k, best_k))
best_k_scores = dist.unnormalized_log_prob(best_k)
self.assert_allclose(best_k_scores, best_k_scores_direct)
self.assert_all(best_k_scores > -1e5)
self.assert_all(best_k_scores[:-1] >= best_k_scores[1:])
# All trees are valid.
for i in range(k):
best_i = jax.tree_map(lambda x: x[i], best_k)
self.assert_valid_marginals(dist, best_i)
self.assert_batch_of_valid_samples(dist, best_i)
# All structs are different from each other.
for i in range(dist.batch_shape[0]):
self.assert_no_duplicates_in_first_axis(
jax.tree_map(lambda x: x[:, i], best_k))
top_1 = jax.tree_map(lambda x: x[None], dist.argmax())
self.assert_allclose(dist.top_k(1)[0], top_1)
if check_prefix_condition:
# Top k-1 is a prefix of top k.
self.assert_allclose(jax.tree_map(lambda x: x[:k-1], best_k),
dist.top_k(k-1)[0])
self.assert_allclose(jax.tree_map(lambda x: x[:1], best_k), top_1)
def test_top_k(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.check_top_k_single_dist(dist)
def test_argmax_can_be_jitted(self):
f = jax.jit(lambda x: x.argmax())
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
jax.block_until_ready(f(dist))
jax.block_until_ready(f(dist))
def test_sampling_can_be_jitted(self):
key = jax.random.PRNGKey(0)
f = jax.jit(lambda x: x.sample(key))
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
jax.block_until_ready(f(dist))
jax.block_until_ready(f(dist))
|
synjax-master
|
synjax/_src/distribution_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution of alignments between two sequences."""
from __future__ import annotations
# pylint: disable=g-multiple-import, g-importing-member
from functools import partial
from typing import Optional, Tuple, Literal, Union
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Array, Float, Int32
import numpy as np
import scipy
from synjax._src.alignment_monotone_general import GeneralMonotoneAlignmentCRF
from synjax._src.constants import INF
from synjax._src.distribution import Distribution
from synjax._src.typing import Shape, Key, typed
def _numpy_non_monotone_align(log_potentials):
vectorized_linear_sum_assignment = np.vectorize(
partial(scipy.optimize.linear_sum_assignment, maximize=True),
signature="(n, n)->(n),(n)")
i, j = vectorized_linear_sum_assignment(log_potentials)
one_hot = lambda x: np.arange(log_potentials.shape[-1]) == x[..., None]
return np.sum(one_hot(i)[..., :, None]*one_hot(j)[..., None, :], axis=-3,
dtype=np.float32)
@jax.custom_gradient
@typed
def _jax_non_monotone_align_callback(log_potentials: Float[Array, "*b n n"],
lenghts: Int32[Array, "*b"]):
"""Computes non-monotone alignment using JAX pure callback to NumPy."""
n = log_potentials.shape[-1]
mask = jnp.arange(n) < lenghts[..., None]
mask = mask[..., None] * mask[..., None, :]
log_potentials = jnp.where(mask, log_potentials, -INF)
diag_mask = (jnp.arange(n) >= lenghts[..., None, None]) * jnp.eye(n)
log_potentials = jnp.where(diag_mask, INF, log_potentials)
alignments = jax.pure_callback(
_numpy_non_monotone_align, jax.ShapeDtypeStruct(log_potentials.shape,
jnp.float32),
log_potentials)
return alignments*mask, lambda g: (jnp.zeros_like(log_potentials), None)
class AlignmentCRF(Distribution):
"""Simple alignment CRF that covers most use-cases.
This alignment class provides both monotone and non-monotone alignment, but
restricts all alignment scores to be defined per cell, i.e. the score does not
depend on the direction of the path trough the alignment table but only on the
actual cells it visits. For a more general type of monotone alignment see
GeneralMonotoneAligmentCRF class.
"""
_log_potentials: Float[Array, "*batch row col"]
_lengths: Int32[Array, "*batch"]
_dist: Optional[GeneralMonotoneAlignmentCRF]
alignment_type: str = eqx.static_field()
@typed
def __init__(self, log_potentials: Float[Array, "*batch row col"], *,
lengths_rows: Optional[Int32[Array, "*batch"]] = None,
lengths_cols: Optional[Int32[Array, "*batch"]] = None,
alignment_type: Literal["monotone_one_to_many",
"monotone_many_to_many",
"non_monotone_one_to_one"]):
super().__init__(log_potentials=None)
self._log_potentials = log_potentials
self.alignment_type = alignment_type
if (lengths_cols is None and lengths_rows is None
and alignment_type == "monotone_one_to_many"
and log_potentials.shape[-2] >= log_potentials.shape[-1]):
raise ValueError("This is a useless distribution because there is "
"less than two alignment possible.")
if lengths_rows is not None:
self._lengths = lengths_rows
else:
self._lengths = jnp.full(log_potentials.shape[:-2],
log_potentials.shape[-1])
if alignment_type == "monotone_one_to_many":
self._dist = GeneralMonotoneAlignmentCRF(
log_potentials_horizontal=(log_potentials, log_potentials),
log_potentials_vertical=None,
lengths_rows=lengths_rows, lengths_cols=lengths_cols)
elif alignment_type == "monotone_many_to_many":
self._dist = GeneralMonotoneAlignmentCRF(
log_potentials_horizontal=(log_potentials, log_potentials),
log_potentials_vertical=log_potentials,
lengths_rows=lengths_rows, lengths_cols=lengths_cols)
elif alignment_type == "non_monotone_one_to_one":
self._dist = None
if lengths_cols is not None:
raise ValueError("Non-monotone alignment requires only lengths_rows.")
if log_potentials.shape[-1] != log_potentials.shape[-2]:
raise ValueError("Non-monotone alignment requires square matrix.")
else:
raise ValueError(f"Unknown alignment type: {alignment_type}")
@property
def event_shape(self) -> Shape:
return self._log_potentials.shape[-2:]
@property
def batch_shape(self) -> Shape:
return self._log_potentials.shape[:-2]
@typed
def sample(self, key: Key, sample_shape: Union[Shape, int] = (), **kwargs
) -> Float[Array, "... n m"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone distribution doesn't support sampling.\n"
"Instead, you can try perturb-and-map by injecting the noise.")
else:
return self._dist.sample(key=key, sample_shape=sample_shape, **kwargs)
@typed
def normalize_log_probs(self, scores: Float[Array, "*b"]
) -> Float[Array, "*b"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone distribution doesn't support normalization.")
else:
return self._dist.normalize_log_probs(scores)
@typed
def log_prob(self, event: Float[Array, "*b n m"], **kwargs
) -> Float[Array, "*b"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support normalized log-probs.")
else:
return self._dist.log_prob(event, **kwargs)
@typed
def unnormalized_log_prob(self, event: Float[Array, "*b n m"], **kwargs
) -> Float[Array, "*b"]:
if self.alignment_type == "non_monotone_one_to_one":
return jnp.einsum("...ij,...ij->...", event, self._log_potentials)
else:
return self._dist.unnormalized_log_prob(event, **kwargs)
@typed
def log_partition(self, **kwargs) -> Float[Array, "*batch"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support log-partition.")
else:
return self._dist.log_partition(**kwargs)
@typed
def marginals_for_template_variables(self, **kwargs
) -> Float[Array, "*batch n m"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support marginals.")
else:
return self._dist.marginals_for_template_variables(**kwargs)
@typed
def marginals(self, **kwargs) -> Float[Array, "*batch n m"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support marginals.")
else:
return self._dist.marginals(**kwargs)
@typed
def argmax(self, **kwargs) -> Float[Array, "*batch n m"]:
if self.alignment_type == "non_monotone_one_to_one":
return _jax_non_monotone_align_callback(self._log_potentials,
self._lengths)
else:
return self._dist.argmax(**kwargs)
@typed
def argmax_and_max(self, **kwargs) -> Tuple[Float[Array, "*batch n m"],
Float[Array, "*batch"]]:
event = self.argmax(**kwargs), self
return event, self.unnormalized_log_prob(event, **kwargs)
@typed
def top_k(self, k: int, **kwargs) -> Tuple[Float[Array, "k *batch n m"],
Float[Array, "k *batch"]]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support top-k.")
else:
return self._dist.top_k(k, **kwargs)
@typed
def entropy(self, **kwargs) -> Float[Array, "*batch"]:
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support entropy.")
else:
return self._dist.entropy(**kwargs)
@typed
def cross_entropy(self, other: AlignmentCRF, **kwargs
) -> Float[Array, "*batch"]:
# pylint: disable=protected-access
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support cross-entropy.")
else:
return self._dist.cross_entropy(other._dist, **kwargs)
@typed
def kl_divergence(self, other: AlignmentCRF, **kwargs
) -> Float[Array, "*batch"]:
# pylint: disable=protected-access
if self.alignment_type == "non_monotone_one_to_one":
raise NotImplementedError(
"Non-monotone alignment doesn't support KL divergence.")
else:
return self._dist.kl_divergence(other._dist, **kwargs)
|
synjax-master
|
synjax/_src/alignment_simple.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distribution representing Hidden Markov Models."""
from typing import Optional, Union
import jax
import jax.numpy as jnp
# pylint: disable=g-multiple-import, g-importing-member
from jaxtyping import Array, Float, Int32
from synjax._src import linear_chain_crf
from synjax._src.typing import typed
@typed
def _expand_simple_sequence_model(
init_score: Float[Array, "*batch state"],
transition_score: Float[Array, "*batch state state"],
emission_score: Float[Array, "*batch n state"]
) -> Float[Array, "*batch n state state"]:
"""Takes simple HMM-like input and expands it into generalized input for CRF.
Args:
init_score: Array of shape (..., t) where t is the number of states,
transition_score: Array of shape (..., t, t),
emission_score:
Array of shape (..., n, t) where n is the length of sequence.
Returns:
Expanded Array of shape (..., n, t, t)
"""
scores = transition_score[..., None, :, :] + emission_score[..., None, :]
scores = scores.at[..., 0, 0, :].set(init_score + emission_score[..., 0, :])
return scores
# pylint: disable=invalid-name
@typed
def HMM(init_logits: Float[Array, "*batch state"],
transition_logits: Float[Array, "*batch state state"],
emission_dist,
observations: Union[Int32[Array, "*batch n"],
Float[Array, "*batch n d"]],
*,
lengths: Optional[Int32[Array, "*batch"]] = None
) -> linear_chain_crf.LinearChainCRF:
"""Builds HMM distribution with t states over n observations.
Note that this is a conditional HMM, i.e. it is a distribution over state
sequences provided by HMM conditioned by a provided input observations.
Because of that calling dist.log_probability(state_sequence) returns a
p(state_sequence | input_sequence; hmm). To get a joint probability of a
state sequence and an input sequence p(state_sequence, input_sequence ; hmm)
call dist.unnormalized_log_probability(state_squence).
Args:
init_logits: Array of shape (..., t)
transition_logits: Array of shape (..., t, t)
emission_dist:
Array of shape (..., t, v) in case of categorical output or a
continuous distribution with Distrax or TensorFlow Probability interface
that has batch of shape (..., t).
observations: Array of shape (..., n) of type jnp.int32 in case of
categorical output or (..., n, d) in case of d-dimensional
vector output.
lengths:
Lengths of each entry in the batch. It has the same shape as the batch
and dtype of jnp.int32. If it's not passed, the maximal length will be
assumed based on the log_potentials.shape[-3].
Returns:
Distribution that is in fact LinearChainCRF but is parametrized in such a
way so that it behaves just the same as if it was an HMM.
"""
if isinstance(observations, Float[Array, "*batch n d"]) and (
hasattr(emission_dist, "log_prob")):
# This is a distrax continuous distribution.
x = jnp.moveaxis(observations, -2, 0)[..., None, :] # (n, *batch_shape,1,d)
lp = emission_dist.log_prob(x) # (n, *batch_shape, state)
emission_scores = jnp.moveaxis(lp, 0, -2) # (*batch_shape, n, state)
elif isinstance(observations, Int32[Array, "*batch n"]) and (
isinstance(emission_dist, Float[Array, "*batch state voc"])):
# This is a categorical distribution.
emission_dist = jax.nn.log_softmax(emission_dist, -1)
x = jnp.take_along_axis(emission_dist, observations[..., None, :], -1)
emission_scores = jnp.swapaxes(x, -1, -2) # (*batch_shape, n, state)
else:
raise ValueError("Arguments for emission_dist and observations do not fit.")
return linear_chain_crf.LinearChainCRF(
log_potentials=_expand_simple_sequence_model(
jax.nn.log_softmax(init_logits),
jax.nn.log_softmax(transition_logits),
emission_scores),
lengths=lengths)
|
synjax-master
|
synjax/_src/hmm.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for spanning_tree_non_projective_crf."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import distribution_test
from synjax._src import spanning_tree_non_projective_crf
from synjax._src.deptree_algorithms import deptree_non_proj_argmax
from synjax._src.deptree_algorithms import deptree_non_proj_wilson_sampling
SpanningTreeNonProjectiveCRF = (
spanning_tree_non_projective_crf.SpanningTreeNonProjectiveCRF)
class SpanningTreeNonProjectiveCRFTest(
distribution_test.DistributionTest):
def create_random_batched_dists(self, key: jax.random.KeyArray):
b = 3
n_words = 5
log_potentials = jax.random.normal(key, (b, n_words+1, n_words+1))
dists = []
for single_root_edge in [True, False]:
dists.append(SpanningTreeNonProjectiveCRF(
log_potentials=log_potentials, lengths=None,
single_root_edge=single_root_edge))
return dists
def create_symmetric_batched_dists(self):
b = 3
n_words = 5
log_potentials = jnp.zeros((b, n_words+1, n_words+1))
dists = []
for single_root_edge in [True, False]:
dists.append(SpanningTreeNonProjectiveCRF(
log_potentials=log_potentials, lengths=None,
single_root_edge=single_root_edge))
return dists
def create_invalid_shape_distribution(self):
return SpanningTreeNonProjectiveCRF(
log_potentials=jnp.zeros((3, 6, 5)), lengths=None,
single_root_edge=True)
def analytic_log_count(self, dist) -> jax.Array:
"""Computes the log of the number of the spanning trees in the support.
Computes the log of the number of spanning trees in the
support of the distribution by using Cayle's formula with the
modification for single-root trees from
Stanojević (2022) https://aclanthology.org/2021.emnlp-main.823.pdf
Args:
dist: Non-Projective distribution object.
Returns:
The log of the number of the spanning trees.
"""
if dist.single_root_edge:
return (dist.lengths-2)*jnp.log(dist.lengths-1)
else:
return (dist.lengths-2)*jnp.log(dist.lengths)
def assert_is_symmetric(self, dist, marginals) -> bool:
sub_matrix = marginals[..., 1:, 1:]
self.assert_allclose(sub_matrix, jnp.swapaxes(sub_matrix, -1, -2))
self.assert_allclose(marginals[..., 0, 1:-1], marginals[..., 0, 2:])
def test_sampling_can_be_jitted(self):
key = jax.random.PRNGKey(0)
for algorithm in ["wilson", "colbourn"]:
f = jax.jit(lambda x: x.sample(key, algorithm=algorithm)) # pylint: disable=cell-var-from-loop
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
jax.block_until_ready(f(dist))
jax.block_until_ready(f(dist))
def assert_batch_of_valid_samples(self, dist, samples):
trees = np.asarray(jnp.argmax(samples, -2).reshape(-1, samples.shape[-1]))
for tree in trees:
self.assertTrue(deptree_non_proj_argmax.is_tree(tree))
n_words = trees.shape[-1]-1
self.assert_allclose(jnp.diagonal(samples, axis1=-2, axis2=-1), 0)
self.assert_allclose(samples[..., 0], 0)
if dist.single_root_edge:
self.assert_allclose(jnp.count_nonzero(trees[..., 1:], axis=-1),
n_words - 1)
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(
jnp.sum(jnp.sum(marginals, -2)[..., 1:], -1),
marginals.shape[-1]-1)
self.assert_allclose(jnp.diagonal(marginals, axis1=-2, axis2=-1), 0)
self.assert_allclose(marginals[..., 0], 0)
if dist.single_root_edge:
self.assert_allclose(jnp.sum(marginals[:, 0, 1:], axis=-1), 1)
def test_top_k(self):
"""This method overrides the test of the superclass.
Currently Non-Projective trees top_k implementation supports only
approximate decoding so if we use the superclass implementation it would
fail on subtests 'Top k-1 is a prefix of top k' and
'Two exact algorithms (top_k and sort) give the same result'.
The rest of this test is the same.
"""
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
self.check_top_k_single_dist(dist, check_prefix_condition=False)
def test_sample_without_replacement(self):
k = 4
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
k_samples, k_logprobs_direct, _ = dist.sample_without_replacement(
jax.random.PRNGKey(0), k=k)
k_logprobs = dist.log_prob(k_samples)
self.assert_allclose(k_logprobs, k_logprobs_direct)
self.assert_all(k_logprobs <= 0)
self.assert_all(k_logprobs > -1e5)
# All trees are valid.
for i in range(k):
self.assert_valid_marginals(dist, k_samples[i])
self.assert_batch_of_valid_samples(dist, k_samples[i])
# All structs are different from each other.
def single_instance_has_duplicates(instance_k_samples):
k_flattened = instance_k_samples.reshape(
instance_k_samples.shape[0], -1)
comparison_matrix = jnp.all(k_flattened[:, None] == k_flattened, -1)
comparison_matrix = jnp.where(jnp.eye(comparison_matrix.shape[-1]),
False, comparison_matrix)
return jnp.any(comparison_matrix)
self.assert_all(
~jax.vmap(single_instance_has_duplicates, in_axes=1)(k_samples),
msg="All top-k structures should be unique.")
def test_wilson_and_colbourn_do_not_crash(self):
for dist in self.create_random_batched_dists(jax.random.PRNGKey(0)):
with self.subTest("testing Wilson"):
sample = dist.sample(jax.random.PRNGKey(0), algorithm="wilson")
self.assert_valid_marginals(dist, sample)
with self.subTest("testing Colbourn"):
sample = dist.sample(jax.random.PRNGKey(0), algorithm="colbourn")
self.assert_valid_marginals(dist, sample)
@parameterized.parameters([dict(single_root_edge=True),
dict(single_root_edge=False)])
def test_marginals_with_given_laplacian_invt(self, single_root_edge: bool):
n = 5
potentials = jax.random.uniform(jax.random.PRNGKey(0), (n, n))
potentials = potentials.at[:, 0].set(0) # Nothing enters root node.
potentials = potentials * (1-jnp.eye(n)) # No self-loops.
log_potentials = jnp.log(potentials)
laplacian = spanning_tree_non_projective_crf._construct_laplacian_hat(
log_potentials, single_root_edge=single_root_edge)
laplacian_invt = jnp.linalg.inv(laplacian).T
marginals_a = (
spanning_tree_non_projective_crf._marginals_with_given_laplacian_invt(
log_potentials, laplacian_invt, single_root_edge=single_root_edge))
marginals_b = jnp.asarray(
deptree_non_proj_wilson_sampling._marginals_with_given_laplacian_invt(
np.asarray(log_potentials), np.asarray(laplacian_invt),
single_root_edge=single_root_edge))
marginals_c = SpanningTreeNonProjectiveCRF(
log_potentials=log_potentials,
single_root_edge=single_root_edge).marginals()
# pylint: disable=g-long-lambda
marginals_d = jax.grad( # This should in principle be the same
lambda x: jnp.linalg.slogdet( # as marginals_a but without API fluff.
spanning_tree_non_projective_crf._construct_laplacian_hat(
x, single_root_edge=single_root_edge))[1])(log_potentials)
self.assert_allclose(marginals_a, marginals_b)
self.assert_allclose(marginals_a, marginals_c)
self.assert_allclose(marginals_a, marginals_d)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/spanning_tree_non_projective_crf_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for alignment_monotone_general."""
# pylint: disable=g-importing-member
from absl.testing import absltest
import jax
import jax.numpy as jnp
from synjax._src import distribution_test
from synjax._src.alignment_monotone_general import GeneralMonotoneAlignmentCRF
from synjax._src.utils import special
class GeneralMonotoneAlignmentCrfTest(distribution_test.DistributionTest):
def analytic_log_count(self, dist: distribution_test.Distribution
) -> jax.Array:
if dist.log_potentials_vertical is not None and (
len(dist.log_potentials_horizontal) == 2):
return special.log_delannoy(
dist.lengths_rows-1, dist.lengths_cols-1,
max_input_value=min(*dist.event_shape))
else:
raise NotImplementedError
def create_random_batched_dists(self, key: jax.random.KeyArray):
b, m, n = 3, 5, 6
step_0 = step_1 = jax.random.normal(key, (b, m, n))
dists = [GeneralMonotoneAlignmentCRF((step_0, step_1), step_0),
GeneralMonotoneAlignmentCRF((step_0, step_1), None)]
return dists
def create_symmetric_batched_dists(self):
b, m = 3, 5
step_0 = step_1 = jnp.zeros((b, m, m))
dists = [GeneralMonotoneAlignmentCRF((step_0, step_1), step_0)]
return dists
def test_crash_on_invalid_shapes(self):
b = 3
m = 5
step_0 = jnp.zeros((b, m, m))
step_1 = jnp.zeros((b, m, m-1))
e = Exception
self.assertRaises(
e, lambda: GeneralMonotoneAlignmentCRF((step_0, step_1), step_0))
self.assertRaises(
e, lambda: GeneralMonotoneAlignmentCRF((step_0, step_1), None))
self.assertRaises(
e, lambda: GeneralMonotoneAlignmentCRF((step_0,), step_1))
def assert_is_symmetric(self, dist, marginals) -> bool:
self.assert_allclose(marginals, jnp.swapaxes(marginals, -1, -2))
self.assert_allclose(marginals,
jnp.rot90(jnp.swapaxes(marginals, -1, -2),
k=2, axes=(-1, -2)))
if dist.log_potentials_vertical is not None:
self.assert_all(marginals > 0)
def assert_batch_of_valid_samples(self, dist, samples):
transitions_count = jnp.sum(samples, (-1, -2))
self.assert_all(transitions_count >= max(*dist.event_shape))
self.assert_all(transitions_count <= sum(dist.event_shape)-1)
def assert_valid_marginals(self, dist, marginals):
self.assert_allclose(marginals[..., -1, -1], 1)
self.assert_allclose(marginals[..., 0, 0], 1)
self.assert_all(marginals.sum(-2) >= 0.98)
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/alignment_monotone_general_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for deptree_utils."""
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.deptree_algorithms import deptree_padding
class DepTreeUtilsTest(parameterized.TestCase):
def assert_allclose(self, x, y):
np.testing.assert_allclose(x, y, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
def test_mask_for_padding(self):
mask = deptree_padding._mask_for_padding(max_nodes=6,
lengths=jnp.array([4]))
self.assertTrue(jnp.allclose(
mask.astype(jnp.int32),
jnp.array(
[[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]])))
def test_mask_for_potentials(self):
mask = deptree_padding._mask_for_potentials(max_nodes=6,
lengths=jnp.array([4]))
self.assertTrue(jnp.allclose(
mask.astype(jnp.int32),
jnp.array(
[[[0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]])))
def test_pad_log_potentials(self):
log_potentials = jnp.full((6, 6), 2)
mask = deptree_padding.pad_log_potentials(log_potentials, jnp.array([4]))
ninf = -constants.INF
self.assertTrue(jnp.allclose(
mask.astype(jnp.int32),
jnp.array(
[[[ninf, 2, 2, 2, ninf, ninf],
[ninf, ninf, 2, 2, ninf, ninf],
[ninf, 2, ninf, 2, ninf, ninf],
[ninf, 2, 2, ninf, 0, 0],
[ninf, ninf, ninf, ninf, ninf, ninf],
[ninf, ninf, ninf, ninf, ninf, ninf]]])))
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/deptree_algorithms/deptree_padding_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Algorithms for random walk sampling of dependency trees from Stanojević 2022.
Stanojević, 2022: https://aclanthology.org/2022.emnlp-main.110.pdf
"""
import numba
import numpy as np
from synjax._src import constants
EPS = constants.EPS
MTT_LOG_EPS = constants.MTT_LOG_EPS
@numba.njit
def _construct_laplacian_hat(log_potentials: np.ndarray, single_root_edge: bool
) -> np.ndarray:
"""Computes a graph laplacian matrix.
Args:
log_potentials: Weight matrix with log-potential entries.
single_root_edge: Whether to use a single-root constraint
Returns:
Laplacian matrix.
"""
potentials = np.exp(np.logaddexp(log_potentials, MTT_LOG_EPS))
potentials[..., 0] = 0 # Removing root-entering edges.
potentials *= (1-np.eye(potentials.shape[-1])) # Removing self-edges
def laplacian(x): # Standard complete Laplacian matrix.
return np.expand_dims(np.sum(x, axis=-2), axis=-2) * np.eye(x.shape[-1]) - x
def cut(x): # Removes 0th row and 0th column.
return x[..., 1:, 1:]
if single_root_edge:
l = laplacian(cut(potentials)) # (..., n-1, n-1)
l[..., 0, :] = potentials[..., 0, 1:]
else:
l = cut(laplacian(potentials)) # (..., n-1, n-1)
return l
@numba.njit
def _marginals_with_given_laplacian_invt(
log_potentials: np.ndarray, laplacian_invt: np.ndarray,
single_root_edge: bool) -> np.ndarray:
"""Computes marginals in cases where the inverse of the Laplacian is provided.
Based on the presentation in Koo et al 2007
https://aclanthology.org/D07-1015.pdf
Args:
log_potentials: Weight matrix with log-potential entries.
laplacian_invt: Inverse-transpose of the Laplacian-hat matrix.
single_root_edge: Whether to use a single-root constraint.
Returns:
Matrix of marginals.
"""
potentials = np.exp(np.logaddexp(log_potentials, MTT_LOG_EPS))
marginals = np.zeros(potentials.shape)
x = np.diag(laplacian_invt).copy() # Extract diagonal of laplacian inverse.
if single_root_edge:
x[0] = 0
x_matrix = x.reshape(1, -1) # (1, n)
y_matrix = laplacian_invt.copy()
if single_root_edge:
y_matrix[0] = 0
marginals[1:, 1:] = potentials[1:, 1:] * (x_matrix - y_matrix)
if single_root_edge:
marginals[0, 1:] = potentials[0, 1:] * laplacian_invt[0]
else:
marginals[0, 1:] = potentials[0, 1:] * np.diag(laplacian_invt)
marginals = np.where(np.isnan(marginals) | (marginals < 0), 0, marginals)
return marginals
@numba.njit
def _marginals(log_potentials: np.ndarray, single_root_edge):
laplacian = _construct_laplacian_hat(log_potentials, single_root_edge)
return _marginals_with_given_laplacian_invt(
log_potentials, np.linalg.inv(laplacian).T, single_root_edge)
@numba.njit
def _sample_wilson_multi_root(log_potentials: np.ndarray):
"""Sampling rooted spanning trees from directed graphs using Wilson algorithm.
Args:
log_potentials: Log-potentials from which to get a sample.
Returns:
Single sample that may contain multiple root edges.
"""
n = log_potentials.shape[0]-1
t = np.zeros(n+1, dtype=np.int64)
visited = np.zeros(n+1, dtype=np.int64)
visited[0] = 1
for i in range(1, n+1):
u: int = i
loop_count = 0
max_loop_count = n * 100 # Needed to prevent infinite loops in some graphs.
while not visited[u] and loop_count < max_loop_count:
loop_count += 0
noise = np.random.gumbel(0, 1, n+1)
v = np.argmax(log_potentials[:, u] + noise)
t[u] = v
u = v
u = i
while not visited[u]:
visited[u] = 1
u = t[u]
return t
@numba.njit
def _sample_generalized_wilson(
log_potentials: np.ndarray, single_root_edge: bool) -> np.ndarray:
"""Returns only a single sample spanning tree."""
if single_root_edge:
log_potentials = log_potentials.copy()
marginals = _marginals(log_potentials, single_root_edge)
root_log_marginals = np.log(np.maximum(marginals, 0.0001))[0]
root_node = np.argmax(root_log_marginals +
np.random.gumbel(0, 1, log_potentials.shape[-1]))
log_potentials[0] = -np.inf
log_potentials[0, root_node] = 0
return _sample_wilson_multi_root(log_potentials)
@numba.guvectorize("(n,n),(),()->(n)", nopython=True)
def _vectorized_sample_wilson(log_potentials, length, single_root_edge, res):
res[:length] = _sample_generalized_wilson(
log_potentials[:length, :length], single_root_edge)
res[length:] = length
def vectorized_sample_wilson(log_potentials, lengths, single_root_edge):
"""Vectorized version of wilson algorithm that returns a single sample."""
single_root_edge_extended = np.full(
log_potentials.shape[:-2], single_root_edge, dtype=np.int64)
if lengths is None:
lengths = np.full(log_potentials.shape[:-2], log_potentials.shape[-1])
out = np.zeros(log_potentials.shape[:-1], dtype=np.int64)
log_potentials = log_potentials.astype(np.float64)
lengths = lengths.astype(np.int64)
_vectorized_sample_wilson(log_potentials, lengths,
single_root_edge_extended, out)
return out
|
synjax-master
|
synjax/_src/deptree_algorithms/deptree_non_proj_wilson_sampling.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utils for processing dependency trees."""
# pylint: disable=g-long-lambda
import jax
import jax.numpy as jnp
from synjax._src import constants
Array = jax.Array
INF = constants.INF
def pad_log_potentials(log_potentials: Array, length: Array) -> Array:
"""Pads adjecancy matrix of log_potentials so that it has same log_partition.
Args:
log_potentials: Log-potentials of shape (..., n, n) for n nodes.
length: Number of nodes (including ROOT) of each element in a batch.
Returns:
Padded log_potentials so that log-partition function value is preserved.
"""
max_nodes = log_potentials.shape[-1]
padding_mask = _mask_for_padding(max_nodes, length)
potentials_mask = _mask_for_potentials(max_nodes, length)
# Set padded elems to 0.
log_potentials = jnp.where(padding_mask, 0, log_potentials)
# Ignore everything else except padding and selected potentials.
log_potentials = jnp.where(potentials_mask|padding_mask, log_potentials, -INF)
return log_potentials
def _mask_for_padding(max_nodes: int, lengths: Array) -> Array:
horizontal = jnp.arange(max_nodes) >= lengths[..., None]
vertical = jnp.arange(max_nodes) == lengths[..., None]-1
return vertical[..., None] & horizontal[..., None, :]
def _mask_for_potentials(max_nodes: int, lengths: Array) -> Array:
horizontal = jnp.arange(max_nodes) < lengths[..., None]
vertical = jnp.arange(max_nodes) < lengths[..., None]
matrix = vertical[..., None] & horizontal[..., None, :]
return matrix.at[..., 0].set(False) & ~jnp.eye(max_nodes, dtype=bool)
|
synjax-master
|
synjax/_src/deptree_algorithms/deptree_padding.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Maximum Spanning Tree algorithm for directed graphs.
Based on Reweighting+Tarjan algorithm from
Stanojević and Cohen (2021): https://aclanthology.org/2021.emnlp-main.823.pdf
"""
from __future__ import annotations
from typing import Optional, Any
import numba
import numpy as np
NPArray = Any
# pylint: disable=g-explicit-length-test
@numba.njit
def is_tree(proposal: np.ndarray) -> bool:
"""Checks if proposal forms a valid spanning tree.
Linear time algorithm from Stanojević and Cohen (2021).
References:
Stanojević and Cohen, 2021 - Figure 9: https://aclanthology.org/2021.emnlp-main.823.pdf#page=16
Args:
proposal: Numpy array in which element at position i specifies arc
proposal[i] -> i.
Returns:
Boolean for the condition of tree connectedness.
""" # pylint: disable=line-too-long
n = proposal.shape[0]
children = [[1 for _ in range(0)] for _ in range(n)]
for i in range(1, n):
children[proposal[i]].append(i)
is_visited = np.zeros(n, dtype=np.int64)
stack = [0]
while len(stack) != 0:
i = stack.pop()
is_visited[i] = True
stack.extend(children[i])
return is_visited.all()
@numba.njit
def is_projective_tree(proposal):
"""Checks if proposal forms a valid projective spanning tree.
Linear time algorithm from Stanojević and Cohen (2021).
References:
Stanojević and Cohen, 2021 - Figure 10: https://aclanthology.org/2021.emnlp-main.823.pdf#page=17
Args:
proposal: Numpy array in which element at position i specifies arc
proposal[i] -> i.
Returns:
Boolean for the condition of projectivity.
""" # pylint: disable=line-too-long
n = proposal.shape[0]
deps_count = np.zeros(n, dtype=np.int64)
for i in range(1, n):
deps_count[proposal[i]] += 1
stack = [0]
for i in range(1, n):
stack.append(i)
while len(stack) > 1:
right = stack.pop()
left = stack.pop()
if proposal[left] == right:
# Exists left arc.
stack.append(right)
deps_count[right] -= 1
elif proposal[right] == left and deps_count[right] == 0:
# Exists right arc.
stack.append(left)
deps_count[left] -= 1
else:
# No attachments possible.
# Restore stack and move to next word.
stack.append(left)
stack.append(right)
break
return stack == [0]
@numba.njit
def _reweighting(log_potentials: NPArray) -> NPArray:
weights_no_inf = np.where(np.isinf(log_potentials), np.nan, log_potentials)
log_potentials = log_potentials.copy()
n = log_potentials.shape[0]-1
correction = n*(np.nanmax(weights_no_inf)-np.nanmin(weights_no_inf))+1
log_potentials[0] -= correction
log_potentials[0, 0] = -np.inf
return log_potentials
@numba.njit
def _nanargmax(x: numba.float64[:]):
max_i = 0
max_val = -np.inf
for i in range(len(x)):
if not np.isnan(x[i]) and x[i] >= max_val:
max_val = x[i]
max_i = i
return max_i
@numba.experimental.jitclass([
("_target", numba.int64[:]),
("_entering_log_potentials", numba.float64[:]),
])
class _EdgePriorityQueue:
"""This is a lossy priority queue used for an efficient MST implementation.
See appendix A in (Stanojević and Cohen, 2021) for more details.
"""
def __init__(self, node_id: int, edge_weights: np.ndarray):
self._target = np.full(edge_weights.shape, node_id)
self._entering_log_potentials = edge_weights
self._entering_log_potentials[node_id] = np.nan
def len(self):
# Counts anything that is not nan.
return np.count_nonzero(~np.isnan(self._entering_log_potentials))
def extract_max(self):
i: int = _nanargmax(self._entering_log_potentials)
w = self._entering_log_potentials[i]
self._entering_log_potentials[i] = np.nan
return i, self._target[i], w
def meld_inplace(self, other: _EdgePriorityQueue) -> None:
# pylint: disable=protected-access
to_replace = (
self._entering_log_potentials < other._entering_log_potentials)
self._target[to_replace] = other._target[to_replace]
self._entering_log_potentials[to_replace] = (
other._entering_log_potentials[to_replace])
self._entering_log_potentials[np.isnan(other._entering_log_potentials)
] = np.nan
def add_const(self, const: float):
self._entering_log_potentials[~np.isinf(self._entering_log_potentials)
] += const
@numba.njit
def _tarjan(log_potentials: np.ndarray) -> np.ndarray:
"""Computes unconstrained Tarjan's (1977) algorithm."""
null_edge = (-1, -1, -np.inf)
log_potentials = log_potentials.copy() # Just in case.
log_potentials[:, 0] = -np.inf
n = log_potentials.shape[0]
max_vertices = n*2-1
vertices_in = [null_edge for _ in range(max_vertices)]
vertices_prev = np.zeros(max_vertices, dtype=np.int64)-1
vertices_children = [[1 for _ in range(0)] for _ in range(max_vertices)]
vertices_queues = (
[_EdgePriorityQueue(dep, log_potentials[:, dep]) for dep in range(n)] +
[None for _ in range(max_vertices-n)])
vertices_parent = np.arange(max_vertices)
vertices_highway = np.arange(max_vertices)
next_free = n
######### Compression phase ########
a = n-1
while vertices_queues[a].len() != 0:
u, v, w = vertices_queues[a].extract_max()
b = vertices_highway[u] # find
assert a != b, "there should be no self-loop in this implementation"
vertices_in[a] = (u, v, w)
vertices_prev[a] = b
if vertices_in[u] == null_edge:
# path extended
a = b
else:
# new cycle formed, collapse
c = next_free
next_free += 1
i = a
while True:
i = vertices_highway[i] # find
vertices_children[c].append(i)
i = vertices_prev[i]
if vertices_highway[i] == a: # find
break
for i in vertices_children[c]:
vertices_parent[i] = c
# union by collapsing
vertices_highway[vertices_highway == vertices_highway[i]] = c
vertices_queues[i].add_const(-vertices_in[i][2])
if vertices_queues[c] is None:
vertices_queues[c] = vertices_queues[i]
else:
vertices_queues[c].meld_inplace(vertices_queues[i])
a = c
######### Expansion phase ########
# Next line is just supervertices = [] but is written as a weird comprehension
# so that Numba infers the correct type List[int].
supervertices = [1 for _ in range(0)]
_dismantle(0, vertices_parent, vertices_children, supervertices)
# pylint: disable=g-explicit-length-test
while len(supervertices) > 0:
c = supervertices.pop()
u, v, w = vertices_in[c]
vertices_in[v] = (u, v, w)
_dismantle(v, vertices_parent, vertices_children, supervertices)
output = np.zeros(n, dtype=np.int64)
for u in range(1, n):
output[u] = vertices_in[u][0]
return output
@numba.njit
def _dismantle(u: int,
vertices_parent: numba.int64[:],
vertices_children: numba.typeof([[1]]),
supervertices: numba.typeof([1])):
"""Dismantles a cycle that was constructed in Tarjan phase 1."""
while vertices_parent[u] != u:
for v in vertices_children[vertices_parent[u]]:
if v == u:
continue
vertices_parent[v] = v
# pylint: disable=g-explicit-length-test
if len(vertices_children[v]) > 0:
supervertices.append(v)
u = vertices_parent[u]
@numba.njit
def _arcmax(log_potentials: NPArray) -> NPArray:
n = log_potentials.shape[-1]-1
proposal = np.zeros(n+1, dtype=np.int64)
for i in range(1, n+1):
proposal[i] = np.argmax(log_potentials[:, i])
return proposal
@numba.njit
def _parse(log_potentials: NPArray, single_root_edge: bool) -> NPArray:
"""Applies ArcMax and Reweighting tricks before calling Tarjan's algorithm."""
proposal = _arcmax(log_potentials)
root_count = np.count_nonzero(proposal[1:] == 0)
if is_tree(proposal) and (not single_root_edge or root_count == 1):
result = proposal
else:
if single_root_edge:
log_potentials = _reweighting(log_potentials)
result = _tarjan(log_potentials)
return result
@numba.guvectorize("(n,n),(),()->(n)", nopython=True)
def _vectorized_mst(log_potentials, length, single_root_edge, res):
res[:length] = _parse(log_potentials[:length, :length], single_root_edge)
res[length:] = length
def vectorized_mst(log_potentials: NPArray, lengths: Optional[NPArray],
single_root_edge: bool) -> NPArray:
"""Numpy implementation of MST that supports batch dimension."""
if lengths is None:
lengths = np.full(log_potentials.shape[:-2], log_potentials.shape[-1])
single_root_edge_expanded = np.full(
log_potentials.shape[:-2], single_root_edge, dtype=np.int64)
assert log_potentials.shape[:-2] == lengths.shape
out = np.full(log_potentials.shape[:-1], -2, dtype=np.int64)
log_potentials = log_potentials.astype(np.float64)
lengths = lengths.astype(np.int64)
with np.errstate(invalid="ignore"):
_vectorized_mst(log_potentials, lengths, single_root_edge_expanded, out)
return out
|
synjax-master
|
synjax/_src/deptree_algorithms/deptree_non_proj_argmax.py
|
# Copyright 2023 DeepMind Technologies Limited.
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generalized einsum that works over any semi-ring structure.
Big part of this code is borrowed from internal JAX codebase (version 0.3.15)
(mostly from jax._src.numpy.lax_numpy) and modified so that instead of calling
standard summation and multiplication operations, it calls the ones provided by
the user. Some code that is not part of the JAX interface was included so that
this module continues working even if internals of JAX change in the future.
"""
import collections
import functools
import inspect
import operator
from typing import Sequence, Tuple, FrozenSet, List, Optional, Any, cast
import jax
from jax import core
import jax.numpy as jnp
import numpy as np
import opt_einsum
__all__ = ["einsum_generalized"]
# Taken from jax._src.lax.lax
def _delta(dtype, shape, axes):
"""This utility function exists for creating Kronecker delta arrays."""
axes = jax.util.safe_map(int, axes)
dtype = jax.dtypes.canonicalize_dtype(dtype)
base_shape = tuple(np.take(shape, axes)) # type: ignore[arg-type]
iotas = [jax.lax.broadcasted_iota(jnp.uint32, base_shape, i)
for i in range(len(base_shape))]
eyes = [jax.lax.eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = jax.lax.convert_element_type_p.bind(
functools.reduce(operator.and_, eyes), new_dtype=dtype, weak_type=False)
return jax.lax.broadcast_in_dim(result, shape, axes)
# Taken from jax._src.numpy.lax_numpy
def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
# Taken and modified from jax._src.numpy.lax_numpy
def _einsum(operands: List[jnp.ndarray],
contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],
precision, sum_fn, mul_op, dot_general, key: jax.random.KeyArray):
"""This function executes tensor contraction operations.
It is taken from jax._src.numpy.lax_numpy and modified so that it allows for
arbitrary summation (not just jnp.sum) and arbitrary multiplication operations
(not just jnp.multiply).
Args:
operands: Tensors that need to be contracted.
contractions: Sequence of contractions specified by opt_einsum.
precision: Desired precision if matrix multiplication is used internally.
sum_fn: Function that does summation and has the same interface as jnp.sum.
mul_op: Function that does multiplication ans has the same interface
as jnp.multiply.
dot_general: Function that optimizes sum_fn and mul_fn in case of
generalized dot product, similar to what jax.lax.dot_general
does for jnp.sum and jnp.multiply. If this argument is None,
dot_general will be constructed automatically, but it won't be
optimized for operations that can use cores dedicated for
matmul.
key: Is a jax.random.KeyArray that is split into sub-keys that are passed to
sum_fn each time it is called. This is useful for semi-rings that
require randomness.
Returns:
The result of a semi-ring einsum.
"""
unzip2 = jax.util.unzip2
# NOTE: In the original implementation from jax._src.numpy.lax_numpy the types
# are promoted using operands = list(_promote_dtypes(*operands)) where
# _promote_dtypes comes from jax._src.numpy.util. It is removed here because
# we do not want to depend on the private parts of JAX.
# Instead we add the condition below that requires
# all inputs to have the same dtype so no promotion is required.
operand_types = [x.dtype for x in operands]
if any(x != operand_types[0] for x in operand_types[1:]):
raise NotImplementedError(
"in generalized einsum all operands have to have the same dtype")
def sum_uniques(operand, names, uniques, *, key):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum_fn(operand, axis=axes, key=key)
names = _removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names, *,
key: jax.random.KeyArray):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = _delta(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum_fn(mul_op(operand, eye), axis=axes, key=key)
names = names.replace(name, "")
else:
operand = sum_fn(mul_op(operand, eye), axis=axes[:-1], key=key)
names = names.replace(name, "", count - 1)
return operand, names
def filter_singleton_dims(operand, names, other_shape, other_names):
s = jnp.shape(operand)
new_shape = []
new_names = []
for i, d in enumerate(names):
other_i = other_names.find(d)
if not core.symbolic_equal_dim(s[i], 1) or other_i == -1 or (
core.symbolic_equal_dim(other_shape[other_i], 1)):
new_shape.append(s[i])
new_names.append(d)
return jnp.reshape(operand, tuple(new_shape)), "".join(new_names)
keys = jax.random.split(key, 5*len(contractions))
for contraction_i, (operand_indices, contracted_names_set, einstr
) in enumerate(contractions):
contracted_names = sorted(contracted_names_set)
input_str, result_names = einstr.split("->")
input_names = input_str.split(",")
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques,
key=keys[contraction_i*5])
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names,
key=keys[contraction_i*5+1])
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_names, rhs_names = input_names
# handle cases where one side of a contracting or batch dimension is 1
# but its counterpart is not.
lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, jnp.shape(rhs),
rhs_names)
rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, jnp.shape(lhs),
lhs_names)
lhs_counts = collections.Counter(lhs_names)
rhs_counts = collections.Counter(rhs_names)
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques,
key=keys[contraction_i*5])
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques,
key=keys[contraction_i*5+1])
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names,
key=keys[contraction_i*5+2])
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names,
key=keys[contraction_i*5+3])
lhs_or_rhs_names = set(lhs_names) | set(rhs_names)
contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]
lhs_and_rhs_names = set(lhs_names) & set(rhs_names)
batch_names = [x for x in result_names if x in lhs_and_rhs_names]
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert all(
name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# contract using dot_general
batch_names_str = "".join(batch_names)
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
deleted_names = batch_names_str + "".join(contracted_names)
remaining_lhs_names = _removechars(lhs_names, deleted_names)
remaining_rhs_names = _removechars(rhs_names, deleted_names)
# Try both orders of lhs and rhs, in the hope that one of them means we
# don't need an explicit transpose. opt_einsum likes to contract from
# right to left, so we expect (rhs,lhs) to have the best chance of not
# needing a transpose.
names = batch_names_str + remaining_rhs_names + remaining_lhs_names
if names == result_names:
dimension_numbers = ((rhs_cont, lhs_cont), (rhs_batch, lhs_batch))
dot_general_args = (rhs, lhs)
else:
names = batch_names_str + remaining_lhs_names + remaining_rhs_names
dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))
dot_general_args = (lhs, rhs)
operand = dot_general(*dot_general_args,
dimension_numbers=dimension_numbers,
key=keys[contraction_i*5+4],
precision=precision)
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple(names.index(name) for name in result_names)
operand = jax.lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
# Taken and modified from jax._src.numpy.lax_numpy
def einsum_generalized(*operands, optimize="optimal", precision=None,
sum_fn, mul_op, dot_general,
key: Optional[jax.random.KeyArray] = None):
"""Generalized version of einsum that works with arbitrary sum and mul ops.
It is taken from jax._src.numpy.lax_numpy and modified minimally so that it
allows for arbitrary summation (not just jnp.sum) and arbitrary multiplication
(not just jnp.multiply) operations.
Args:
*operands: Tensors that need to be contracted.
optimize: Level of opt_einsum optimization to use.
precision: Desired precision if matrix multiplication is used internally.
sum_fn: Function that does summation and has the same interface as jnp.sum.
mul_op: Function that does multiplication ans has the same interface
as jnp.multiply.
dot_general: Function that optimizes sum_fn and mul_fn in case of
generalized dot product, similar to what jax.lax.dot_general
does for jnp.sum and jnp.multiply. If this argument is None,
dot_general will be constructed automatically, but it won't be
optimized for operations that can use cores dedicated for
matmul.
key: Is a jax.random.KeyArray that is split into sub-keys that are passed to
sum_fn each time it is called. This is useful for semi-rings that
require randomness.
Returns:
The result of a semi-ring einsum.
"""
def add_key_wrap(fn):
def fn2(*args, key=None, **kwargs):
del key
return fn(*args, **kwargs)
return fn2
spec = inspect.getfullargspec(sum_fn)
if "key" not in (spec.args + spec.kwonlyargs):
sum_fn = add_key_wrap(sum_fn)
if key is None:
key = jax.random.PRNGKey(0)
# pylint: disable=g-bool-id-comparison
optimize = "optimal" if optimize is True else optimize
# using einsum_call=True here is an internal api for opt_einsum
# Allow handling of shape polymorphism
# pylint: disable=g-complex-comprehension
non_constant_dim_types = {
type(d) for op in operands if not isinstance(op, str)
for d in jnp.shape(op) if not core.is_constant_dim(d)
}
if not non_constant_dim_types:
einsum_contract_path_fn = opt_einsum.contract_path
else:
# NOTE: This branch in the original implementation from
# jax._src.numpy.lax_numpy calls internal function
# `_polymorphic_einsum_contract_path_handlers` but it's excluded here
# because it seems useful only for jax2tf and we want not to depend on the
# private functions of JAX.
raise NotImplementedError("generalized version of einsum doesn't support"
"polymorphic contact path handlers")
operands, contractions = einsum_contract_path_fn(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
# The line below fixes the wrong return type of opt_einsum.contract_path.
contractions = cast(List[Tuple[Any, ...]], contractions)
contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions)
return _einsum(operands, contractions, precision, sum_fn, mul_op, dot_general,
key=key)
|
synjax-master
|
synjax/_src/utils/semirings_einsum.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for semirings_einsum."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from synjax._src import constants
from synjax._src.utils import semirings_dot_general
from synjax._src.utils import semirings_einsum
def einsum_log(*operands, **kwargs):
sum_fn = jax.nn.logsumexp
mul_op = jnp.add
dot_general = semirings_dot_general.build_dot_general(sum_fn, mul_op)
return semirings_einsum.einsum_generalized(
*operands, **kwargs,
sum_fn=sum_fn, mul_op=mul_op, dot_general=dot_general)
def einsum_real(*operands, **kwargs):
sum_fn = jnp.sum
mul_op = jnp.multiply
dot_general = semirings_dot_general.build_dot_general(sum_fn, mul_op)
return semirings_einsum.einsum_generalized(
*operands, **kwargs,
sum_fn=sum_fn, mul_op=mul_op, dot_general=dot_general)
def einsum_tropical(*operands, **kwargs):
sum_fn = jnp.max
mul_op = jnp.add
dot_general = semirings_dot_general.build_dot_general(sum_fn, mul_op)
return semirings_einsum.einsum_generalized(
*operands, **kwargs,
sum_fn=sum_fn, mul_op=mul_op, dot_general=dot_general)
class SemiringsEinsumTest(parameterized.TestCase):
def assert_allclose(self, x, y):
np.testing.assert_allclose(x, y, rtol=constants.TESTING_RELATIVE_TOLERANCE,
atol=constants.TESTING_ABSOLUTE_TOLERANCE)
def test_einsum_generalized(self):
bs = (11, 3)
x = jax.random.uniform(jax.random.PRNGKey(0), (2, *bs, 5, 4))
y = jax.random.uniform(jax.random.PRNGKey(0), (2, 4, 7, *bs))
expression = "s...ab,sbc...->s...ac"
self.assert_allclose(
einsum_real(expression, x, y),
jnp.einsum(expression, x, y))
def test_einsum_tropical_semiring(self):
x = jax.random.uniform(jax.random.PRNGKey(0), (2, 4, 3, 5, 4))
self.assert_allclose(
jnp.max(x, (0, -2, -1)),
einsum_tropical("a...bc->...", x))
def test_einsum_log_semiring(self):
bs = (11, 3)
x = jax.random.uniform(jax.random.PRNGKey(0), (2, *bs, 5, 4))
y = jax.random.uniform(jax.random.PRNGKey(0), (2, 4, 7, *bs))
expression = "s...ab,sbc...->s...ac"
self.assert_allclose(
jnp.log(jnp.einsum(expression, jnp.exp(x), jnp.exp(y))),
einsum_log(expression, x, y))
if __name__ == "__main__":
absltest.main()
|
synjax-master
|
synjax/_src/utils/semirings_einsum_test.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for efficient chart manipulation."""
from __future__ import annotations
# pylint: disable=g-multiple-import
# pylint: disable=g-importing-member
from typing import Union
import equinox as eqx
import jax
import jax.numpy as jnp
from jaxtyping import Int, Float, Array
from synjax._src import constants
from synjax._src.typing import typed
from synjax._src.utils import semirings
from synjax._src.utils import special
Semiring = semirings.Semiring
roll = special.roll
SpanSize = Union[int, Int[Array, ""]]
@typed
def from_cky_table(cky_table: Float[Array, "s n n ..."]) -> "Chart":
"""Creates a chart from a table of shape (s, n, n, ...).
Args:
cky_table: Entries with shape (s, n, n, ...) where first axis is
dedicated for semiring's usage, and second and third index
a constituent that spans from (i, j) inclusive on both sides.
Returns:
Initialized Chart instance.
"""
n = cky_table.shape[1]
f = lambda c, i: roll(c, -i, 1)
table_left_child = jax.vmap(f, in_axes=(1, 0), out_axes=1
)(cky_table, jnp.arange(n))
f = lambda c, i: roll(c, n-i-1, 1)
cky_table_transposed = jnp.swapaxes(cky_table, 2, 1)
table_right_child = jax.vmap(f, in_axes=(1, 0), out_axes=1
)(cky_table_transposed, jnp.arange(n))
return Chart(table_left_child, table_right_child)
@typed
class Chart(eqx.Module):
"""Vectorized chart methods described by Rush (2020).
References:
Rush, 2020 - Section 6b: https://arxiv.org/pdf/2002.00876.pdf#page=5
"""
_table_left_child: Float[Array, "s n n ..."] # Cr in Rush (2020).
_table_right_child: Float[Array, "s n n ..."] # Cl in Rush (2020).
@typed
def __init__(self,
table_left_child: Float[Array, "s n n ..."],
table_right_child: Float[Array, "s n n ..."]):
self._table_left_child = table_left_child
self._table_right_child = table_right_child
@typed
def left(self) -> Float[Array, "s n n ..."]:
return self._table_left_child
@typed
def right_unmasked(self, d: SpanSize) -> Float[Array, "s n n ..."]:
a = roll(self._table_right_child, -d+1, axis=1)
b = roll(a, d-1, axis=2)
return b
@typed
def get_entries(self, d: SpanSize) -> Float[Array, "s n ..."]:
return self._table_left_child[:, :, d-1]
@typed
def set_entries(self, d: SpanSize, entries) -> "Chart":
new_table_left_child = self._table_left_child.at[:, :, d-1].set(entries)
new_table_right_child = self._table_right_child.at[:, :, -d].set(
roll(entries, d-1, axis=1))
return Chart(new_table_left_child, new_table_right_child)
def __repr__(self):
s = f"Chart[{self._table_left_child.shape}](\n"
s += f" Cr:\n{self._table_left_child}\n"
s += f" Cl:\n{self._table_right_child}\n"
s += ")"
return s
@typed
def left_non_empty(self) -> Float[Array, "s n n ..."]:
return roll(self.left(), -1, axis=2)
@typed
def right(self, d: SpanSize, sr: Semiring,
exclude_word_nodes: bool = False) -> Float[Array, "s n n ..."]:
return sr.mul(self.mask(d, sr, exclude_word_nodes), self.right_unmasked(d))
@typed
def right_non_empty(self, d: SpanSize, sr: Semiring
) -> Float[Array, "s n n ..."]:
return sr.mul(self.mask(d, sr, exclude_word_nodes=False),
self.right_unmasked_non_empty(d))
@typed
def right_unmasked_non_empty(self, d: SpanSize) -> Float[Array, "s n n ..."]:
return roll(self.right_unmasked(d), 1, axis=2)
@typed
def mask(self, d: SpanSize, sr: Semiring, exclude_word_nodes: bool
) -> Float[Array, "s n n ..."]:
n = self._table_left_child.shape[1]
vertical = jnp.arange(n) < n-d+1
if exclude_word_nodes:
horizontal = (jnp.arange(n) < d-2).at[0].set(False)
else:
horizontal = jnp.arange(n) < d-1
mask = vertical[:, None] & horizontal
mask = sr.wrap(jnp.where(mask, 0., -constants.INF))
mask = jnp.expand_dims(mask, range(3, self._table_left_child.ndim))
return mask
@typed
def pick_length(self, length: Int[Array, ""]) -> Float[Array, "s ..."]:
return self._table_left_child[:, 0, length-1]
|
synjax-master
|
synjax/_src/utils/chart_struct.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of semirings."""
import abc
import functools
import operator
from typing import Sequence, Optional, Union
import jax
import jax.numpy as jnp
# pylint: disable=g-importing-member
from synjax._src.config import get_config
from synjax._src.constants import INF
from synjax._src.utils import semirings_dot_general
from synjax._src.utils import semirings_einsum
from synjax._src.utils import special
Array = jax.Array
KeyArray = jax.random.KeyArray
Axis = Union[int, Sequence[int]]
def einsum_builder(sum_fn, mul_op):
dot_general = semirings_dot_general.build_dot_general(sum_fn, mul_op)
def einsum_fn(subscripts, *operands, key=None, **kwargs):
return semirings_einsum.einsum_generalized(
subscripts, *operands, key=key, sum_fn=sum_fn, mul_op=mul_op,
dot_general=dot_general, **kwargs)
if get_config().checkpoint_semiring_einsum:
einsum_fn = jax.checkpoint(einsum_fn, static_argnums=(0,))
return einsum_fn
def _wrap_fn_multi_axis_reduce(fn):
"""Extends support from single to multiple axes reduce."""
def fn2(a, axis, *, key):
if isinstance(axis, int):
return fn(a, key=key, axis=axis)
elif isinstance(axis, Sequence):
reduce_axes = tuple(x%a.ndim for x in axis)
other_axes = tuple(x for x in range(a.ndim) if x not in reduce_axes)
a = jnp.transpose(a, other_axes+reduce_axes)
a = jnp.reshape(a, a.shape[:len(other_axes)]+(-1,))
return fn(a, key=key, axis=-1)
else:
raise ValueError(f"Axis cannot be of type {type(axis)}.")
return fn2
class Semiring(metaclass=abc.ABCMeta):
"""Semiring interface."""
def wrap(self, log_potentials: Array) -> Array:
"""Wraps raw log-potentials into their semi-ring form.
For most semirings that form will be the same as in the standard
log-potentials with only the difference in having an additional axis in
the beginning. In top-k semiring this axis will have size k. In other
semirings the size is 1. In effect, other semirings do not need this special
axis but we keep it for the sake of having consistent shapes across
semirings. The default implementation below covers all semirings except
top-k semiring.
Args:
log_potentials: jnp.ndarray with log potentials.
Returns:
Log-potentials adapted to a particular semiring.
"""
return jax.tree_map(lambda x: x[None], log_potentials)
def unwrap(self, wrapped):
"""Reverses the effect of Semiring.wrap()."""
return jax.tree_map(lambda x: x.squeeze(0), wrapped)
def one(self, shape=()) -> Array:
return self.wrap(jnp.zeros(shape))
def zero(self, shape=()) -> Array:
return self.wrap(jnp.full(shape, -INF))
def mul(self, a: Array, b: Array, *cs: Array) -> Array:
return functools.reduce(operator.add, [a, b, *cs])
def add(self, a: Array, b: Array, *cs: Array, key: Optional[KeyArray] = None
) -> Array:
return self.sum(jnp.stack((a, b, *cs), axis=1), axis=1, key=key)
def sum(self, a: Array, axis: Axis, *, key: Optional[KeyArray] = None
) -> Array:
raise NotImplementedError
def einsum(self, subscripts: str, *operands, key: Optional[KeyArray] = None,
**kwargs) -> Array:
fn = einsum_builder(self.sum, self.mul)
return fn(subscripts, *operands, key=key, **kwargs)
class LogSemiring(Semiring):
"""Implements the log-space semiring (logsumexp, +, -inf, 0).
Gradients give marginals.
"""
def sum(self, a: Array, axis: Axis, *, key: Optional[KeyArray] = None
) -> Array:
return jax.nn.logsumexp(a, axis=axis)
def add(self, a: Array, b: Array, *cs: Array, key: Optional[KeyArray] = None
) -> Array:
return functools.reduce(jnp.logaddexp, [a, b, *cs])
class MaxSemiring(Semiring):
"""Implements the max semiring (max, +, -inf, 0).
Gradients give argmax.
"""
def __init__(self, strict_max: Optional[bool] = None):
if strict_max is None:
self._strict_max = get_config().use_strict_max
else:
self._strict_max = strict_max
def sum(self, a: Array, axis: Axis, *, key: Optional[KeyArray] = None
) -> Array:
if self._strict_max:
def _strict_max_fn(x, axis: Axis, *, key: Optional[KeyArray] = None):
del key
indices = jnp.argmax(x, axis=axis, keepdims=True)
vals = jnp.take_along_axis(x, indices, axis)
return jnp.squeeze(vals, axis)
fn = _wrap_fn_multi_axis_reduce(_strict_max_fn)
return fn(a, key=key, axis=axis)
else:
return jnp.max(a, axis=axis)
def add(self, a: Array, b: Array, *cs: Array, key: Optional[KeyArray] = None
) -> Array:
return functools.reduce(jnp.maximum, [a, b, *cs])
class KBestSemiring(Semiring):
"""Implements semiring of which a gradient give a sample."""
def __init__(self, k: int, approximate: bool):
super().__init__()
self.k = k
self.approximate = approximate
def wrap(self, log_potentials: Array) -> Array:
x = jnp.full((self.k, *log_potentials.shape), -INF)
x = x.at[0].set(log_potentials)
return x
def unwrap(self, wrapped):
return wrapped
def mul(self, a: Array, b: Array, *cs: Array) -> Array:
for c in [b, *cs]:
a = self.sum(a[:, None] + c[None], key=None, axis=1)
return a
def sum(self, a: Array, axis: Axis, *, key: Optional[KeyArray] = None
) -> Array:
if self.k == 1:
return MaxSemiring().sum(a, axis, key=key)
else:
fn = _wrap_fn_multi_axis_reduce(self._sum_single_axis)
return fn(a, key=key, axis=axis)
def _sum_single_axis(self, a: Array, key: KeyArray, axis: int) -> Array:
"""Computes sum within one axis only."""
del key
if self.approximate:
a = jnp.moveaxis(a, axis, 1) # Reduce axis should be SECOND.
a = a.reshape(-1, *a.shape[2:])
a = jax.lax.approx_max_k(a, k=self.k, reduction_dimension=0)[0]
else:
a = jnp.moveaxis(a, axis, -1) # Reduce axis should be LAST.
a = jnp.moveaxis(a, 0, -1)
a = a.reshape(*a.shape[:-2], -1)
a = jax.lax.top_k(a, k=self.k)[0]
a = jnp.moveaxis(a, -1, 0)
return a
class SamplingSemiring(Semiring):
"""Implements the semiring whose gradients provide samples."""
def sum(self, a: Array, axis: Axis, *, key: Optional[KeyArray] = None
) -> Array:
if key is None:
raise ValueError("KeyArray cannot be None.")
@jax.custom_gradient
def _sum_sampling(a, key):
def grad(g):
g = jnp.expand_dims(g, axis)
return special.sample_one_hot(a, axis=axis, key=key)*g, None
return jax.nn.logsumexp(a, axis), grad
return _sum_sampling(a, key)
|
synjax-master
|
synjax/_src/utils/semirings.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.