python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module implementing primitives for training by gradient descent."""
import dataclasses
from typing import Any, Callable, Mapping, Optional, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import tasks
from experiments_torch.environment import pickle_checkpointer
from experiments_torch.training import models
from torch import nn
from torch import optim
LOG_INTERVAL = 10
DEFAULT_MOVING_AVERAGE_ALPHA = 0.8
UpdateFn = Any
LoadParamsFn = Callable[[nn.Module], Tuple[nn.ParameterList, nn.ParameterList]]
@dataclasses.dataclass
class TrainState:
model: nn.Module
optimizer: Optional[optim.Optimizer]
def init_train_state(
model: models.Model,
optimizer_ctor: Callable[..., optim.Optimizer],
optimizer_kwargs: Mapping[str, Any],
load_params_fn: Optional[LoadParamsFn] = None,
*,
log_model_summary: bool = True,
) -> TrainState:
"""Initialize model parameter and state.
Args:
model: the model.
optimizer_ctor: the optimizer not instantiated.
optimizer_kwargs: the optimizer arguments.
load_params_fn: Optional function to load pre-trained parameters and/or to
freeze a subset of the parameters. The function takes the models randomly
initialized parameters, and returns two list of parameters.
([trainable_params, ...], [frozen_params, ...]) tuple.
log_model_summary: When True, logs information about the initialized
parameters and state.
Returns:
A TrainState structure.
"""
if load_params_fn:
trainable_params, frozen_params = load_params_fn(model)
optimizer = optimizer_ctor([{
"params": trainable_params,
"lr": 0.1
}, {
"params": frozen_params,
"lr": 0.
}], **optimizer_kwargs)
else:
trainable_params = model.parameters()
optimizer = optimizer_ctor(trainable_params, lr=0.1, **optimizer_kwargs) # pytype: disable=wrong-keyword-args
if log_model_summary:
logging.info("Model parameters: \n%s", models.param_summary(model))
logging.info("Model size: \n%s", models.size_summary(model))
return TrainState(model=model, optimizer=optimizer)
def restore_train_state(train_state_checkpoint_path: str) -> TrainState:
"""Load train state from checkpoint path if it has been saved to disk."""
if train_state_checkpoint_path is None:
return None
checkpointer = pickle_checkpointer.PickleCheckpointer(
train_state_checkpoint_path)
train_state = checkpointer.restore()
return train_state
def save_train_state(checkpoint_file_path: str, task_key: tasks.TaskKey,
train_state: TrainState):
logging.info("Saving train state for train task %s to %s", task_key.name,
checkpoint_file_path)
checkpointer = pickle_checkpointer.PickleCheckpointer(checkpoint_file_path)
checkpointer.write(train_state)
class StepCountEstimator:
"""Estimates how many steps per second are achieved during trainnig."""
def __init__(self, alpha: float = DEFAULT_MOVING_AVERAGE_ALPHA) -> None:
self._ema = None
self._alpha = alpha
def add_measurement(self, elapsed_seconds: float):
if self._ema is None:
self._ema = elapsed_seconds
else:
self._ema = self._alpha * self._ema + (1 - self._alpha) * elapsed_seconds
def estimated_steps_per_second(self) -> float:
if not self._ema:
return float("nan")
return 1 / self._ema
|
dm_nevis-master
|
experiments_torch/training/trainer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_torch.training.heads."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_torch.training import heads
import numpy as np
import torch
class HeadsTest(parameterized.TestCase):
def test_metrics_multi_label(self):
num_labels = 17
num_examples = 100
head = heads.MultiLabelHead(features_dim=num_labels, num_classes=num_labels)
inputs = np.random.normal(size=(num_examples, num_labels))
targets = np.ones((num_examples, num_labels))
with torch.no_grad():
_, diagnostics = head.loss_and_metrics(
torch.tensor(inputs).float(),
torch.tensor(targets).float(),
is_training=False)
error = diagnostics['error']
self.assertLen(error.shape, 2)
self.assertEqual(error.shape[0], num_examples)
self.assertEqual(error.shape[1], num_labels)
self.assertLessEqual(np.max(error), 1.0)
self.assertGreaterEqual(np.min(error), 0.0)
# We have p=0.5 chance of getting each prediction correct, summed over
# a number of iid trials.
expected_value = 0.5
self.assertAlmostEqual(expected_value, error.mean(), delta=0.1)
diagnostics = {k: np.mean(v) for k, v in diagnostics.items()}
for v in diagnostics.values():
self.assertGreaterEqual(v, 0.)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_torch/training/heads_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_torch.training.augmentations."""
import functools
from typing import Any, Mapping, Sequence, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from experiments_torch.training import augmentations
import numpy as np
import tensorflow as tf
class AugmentationsTest(parameterized.TestCase):
def test_chain(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
augmentation_fn = functools.partial(
augmentations.chain,
augmentation_ctors_with_kwargs=[
(augmentations.normalize, {}),
(augmentations.resize, {
'size': (30, 30)
}),
(augmentations.random_crop, {
'size': (20, 20)
}),
])
ds = ds.map(augmentation_fn)
items = list(ds)
self.assertLen(items, 2)
for item in items:
# Grayscale images should be converted to color.
self.assertEqual(3, item.image.shape[-1])
self.assertEqual((20, 20, 3), item.image.shape)
def test_normalize(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.normalize)
items = list(ds)
self.assertLen(items, 2)
for item in items:
# Grayscale images should be converted to color.
self.assertEqual(3, item.image.shape[-1])
def test_standardize_per_image(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.standardize_per_image)
items = list(ds)
# We only test whether this does compile.
self.assertLen(items, 2)
def test_random_flip(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.random_flip)
items = list(ds)
# We only test whether this does compile.
self.assertLen(items, 2)
def test_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.resize, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_central_crop_via_cropped_window_and_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.central_crop_via_cropped_window_and_resize,
size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_random_crop_via_cropped_window_and_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.random_crop_via_cropped_window_and_resize,
size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_central_crop_via_cropped_window_and_resize_small_image(self):
ds = _fixture_dataset([((3, 3, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.central_crop_via_cropped_window_and_resize,
size=(2, 2)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((2, 2, 3), item.image.shape)
def test_random_crop_via_cropped_window_and_resize_small_image(self):
ds = _fixture_dataset([((3, 3, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.random_crop_via_cropped_window_and_resize,
size=(2, 2)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((2, 2, 3), item.image.shape)
def test_central_crop(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.central_crop, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_random_crop(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.random_crop, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
@parameterized.parameters([
dict(image_shape=(224, 300), padding=0, expected=(0, 38, 224, 224)),
dict(image_shape=(300, 224), padding=0, expected=(38, 0, 224, 224)),
dict(image_shape=(224, 300), padding=16, expected=(16, 54, 192, 192)),
dict(image_shape=(300, 224), padding=16, expected=(54, 16, 192, 192)),
dict(image_shape=(32 + 1, 32 + 1), padding=16, expected=(16, 16, 1, 1)),
])
def test_central_crop_window(self, image_shape, padding, expected):
image_shape = tf.constant(image_shape, dtype=tf.int32)
bbox = augmentations.central_crop_window(image_shape, padding)
np.testing.assert_allclose(expected, bbox)
@parameterized.parameters([
dict(image_shape=(224, 300, 3)),
dict(image_shape=(224, 224, 3)),
dict(image_shape=(100, 10, 3)),
])
def test_random_sample_crop_window(self, image_shape):
windows = []
for i in range(100):
crop_window = augmentations.sample_random_crop_window(
tf.constant(image_shape), seed=i)
windows.append(tuple(w.numpy() for w in crop_window))
# Test that we see plenty of variety in the samples.
different_samples = set(windows)
assert len(different_samples) > 50
image_area = image_shape[0] * image_shape[1]
(min_area, max_area) = augmentations.AREA_RANGE
(min_aspect_ratio, max_aspect_ratio) = augmentations.ASPECT_RATIO_RANGE
sampled_min_area = min(w[2] * w[3] for w in windows)
sampled_max_area = max(w[2] * w[3] for w in windows)
sampled_min_aspect_ratio = min(w[3] / w[2] for w in windows)
sampled_max_aspect_ratio = min(w[3] / w[2] for w in windows)
self.assertLess(sampled_max_area / image_area, max_area + 1e-4)
self.assertGreater(sampled_min_area / image_area, min_area - 1e-4)
self.assertLess(sampled_max_aspect_ratio, max_aspect_ratio + 1e-4)
self.assertGreater(sampled_min_aspect_ratio, min_aspect_ratio - 1e-4)
def _fixture_dataset(
shapes_and_labels: Sequence[Tuple[Tuple[int, int, int], int]]
) -> tf.data.Dataset:
"""Constructs a fixture containing minibatches.
We round-trip the data via pngs, since this will result in shape tensors
that are not determined at graph compile time. This ensures that the tested
mappable functions work in graph mode, which is enforced by
tf.data.Dataset.map(...).
Args:
shapes_and_labels: The image shapes and label values to use for the
fixtures.
Returns:
A tensorflow dataset.
"""
def gen():
for shape, label in shapes_and_labels:
yield _encode_example(image=np.zeros(shape, dtype=np.uint8), label=label)
ds = tf.data.Dataset.from_generator(
gen,
output_signature={
'image': tf.TensorSpec(shape=(), dtype=tf.string),
'label': tf.TensorSpec(shape=(), dtype=tf.int32),
})
def to_minibatch(example) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=tf.io.decode_png(example['image']),
label=example['label'],
multi_label_one_hot=None,
)
return ds.map(to_minibatch, deterministic=True)
def _encode_example(image: np.ndarray, label: int) -> Mapping[str, Any]:
"""Create a tf example using named fields."""
return {
'image': _encoded_png_feature(image),
'label': label,
}
def _encoded_png_feature(image: np.ndarray) -> bytes:
return tf.io.encode_png(image).numpy()
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_torch/training/augmentations_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate schedules for Nevis benchmarker."""
# TODO: scheduler should be multiplier or setter? wbt frozen?
# pylint: disable=protected-access
# pytype: disable=attribute-error
from typing import Any, Dict, Mapping, NamedTuple, Optional, Sequence
from absl import logging
from torch import optim
from torch.optim import lr_scheduler
class ProgressAndScale(NamedTuple):
"""Combines a progress and scale.
Attributes:
progress: the progress (in the range [0, 1] through the full train loop).
scale: The learning rate scaling to apply once the given progress has been
completed.
"""
progress: float
scale: float
class MultiplicativeLR(lr_scheduler._LRScheduler):
"""Piece-wise constant learning rate depending on learning progress.
Attributes:
optimizer: Wrapped optimizer.
initial_learning_rate: float
max_steps: Maximum number of training steps (batched weight updates).
boundaries_and_scale: A sequence of tuples `(progress, scale)`, where
`progress` indicates a step of `max_steps` at which learning rate is
scaled by `scale`.
Returns:
Learning rate schedule function.
"""
def __init__(self, optimizer: optim.Optimizer, initial_learning_rate: float,
boundaries_and_scales: Mapping[float, float]):
self.initial_learning_rate = initial_learning_rate
self.boundaries_and_scales = boundaries_and_scales
super().__init__(optimizer)
def get_lr(self):
prev_scale, scale = 1.0, 1.0
for boundary, scale in self.boundaries_and_scales.items():
if self._step_count < boundary:
break
prev_scale = scale
return [
self.initial_learning_rate * prev_scale if group['lr'] != 0. else 0.
for group in self.optmizer.param_groups
]
def piecewise_constant_progress_aligned_learning_rate_schedule(
optimizer: optim.Optimizer,
initial_learning_rate: float,
max_steps: int,
learning_progress_boundaries_and_scales: Sequence[ProgressAndScale],
) -> lr_scheduler._LRScheduler:
"""Piece-wise constant learning rate depending on learning progress.
Args:
optimizer: Wrapped optimizer.
initial_learning_rate: float
max_steps: Maximum number of training steps (batched weight updates).
learning_progress_boundaries_and_scales: A sequence of tuples `(progress,
scale)`, where `progress` indicates a the portion (in [0,1] range) of
`max_steps` at which learning rate is scaled by `scale`.
Returns:
Learning rate schedule function.
"""
boundaries_and_scales = {}
for (progress, scale) in learning_progress_boundaries_and_scales:
step = int(progress * max_steps)
boundaries_and_scales[step] = scale
logging.info('Using piecewise linear.\n Boundaries: \n%s',
boundaries_and_scales)
return MultiplicativeLR(optimizer, initial_learning_rate,
boundaries_and_scales)
class ConstantLR(lr_scheduler._LRScheduler):
"""Constant learning rate schedule.
# Args:
optimizer: wrapper optimizer.
init_value: constant learning rate value.
"""
def __init__(self, optimizer: optim.Optimizer, init_value: float):
super().__init__(optimizer)
self.init_value = init_value
def get_lr(self):
return [self.init_value for _ in self.optimizer.param_groups]
def constant_learning_rate_schedule(
optimizer: optim.Optimizer, init_value: float) -> lr_scheduler._LRScheduler:
"""Constant learning rate schedule."""
return ConstantLR(optimizer, init_value)
class GradualWarmupScheduler(lr_scheduler._LRScheduler):
"""Warmup scheduler followed by any existing scheduler.
Args:
optimizer (Optimizer): Wrapped optimizer.
end_warmup_lr: final learning rate after warmup.
warmup_steps: total number of warmup steps
after_scheduler: after warmup_steps, use this scheduler (eg.
ReduceLROnPlateau)
"""
def __init__(self,
optimizer: optim.Optimizer,
end_warmup_lr: float,
warmup_steps: int,
after_scheduler: Optional[lr_scheduler._LRScheduler] = None):
self.end_warmup_lr = end_warmup_lr
self.warmup_steps = warmup_steps
self.after_scheduler = after_scheduler
super().__init__(optimizer)
def get_lr(self):
if self._step_count < self.warmup_steps:
return [
self._step_count *
(self.end_warmup_lr / self.warmup_steps) if group['lr'] != 0. else 0.
for group in self.optimizer.param_groups
]
return self.after_scheduler.get_lr()
def step(self, epoch=None):
if self._step_count < self.warmup_steps:
super(GradualWarmupScheduler, self).step(epoch)
else:
self.after_scheduler.step(epoch)
def warmup_cosine_decay_learning_rate_schedule(
optimizer: optim.Optimizer, initial_learning_rate: float,
steps_per_epoch: int, max_steps: int, warmup_epochs: int,
final_learning_rate: float) -> lr_scheduler._LRScheduler:
"""Warmup cosine learning rate schedule."""
# The warmup steps must be strictly less than the number of overall steps.
warmup_steps = min(max_steps - 1, warmup_epochs * steps_per_epoch)
logging.info(
'Cosine decay schedule: warmup: %d, max steps: %d',
warmup_steps,
max_steps,
)
return GradualWarmupScheduler(
optimizer,
end_warmup_lr=initial_learning_rate,
warmup_steps=warmup_steps,
after_scheduler=lr_scheduler.CosineAnnealingLR(
optimizer, T_max=max_steps, eta_min=final_learning_rate))
def build_learning_rate_schedule(
optimizer: optim.Optimizer, learning_rate_schedule_name: str,
initial_learning_rate: float, steps_per_epoch: int, max_steps: int,
learning_rate_schedule_kwargs: Dict[str, Any]) -> lr_scheduler._LRScheduler:
"""Creates a learning_rate_schedule function for given arguments.
This function assumes that `steps_per_epoch` and `max_steps` are not contained
in `learning_rate_schedule_kwargs`. The reason for this constraint is due to
the fact that these arguments could be dynamically recomputed on the learner
side depending on which dataset is used.
Args:
optimizer: Wrapped optimizer.
learning_rate_schedule_name: A name of a learning rate schedule.
initial_learning_rate: An initial value of the learning rate.
steps_per_epoch: Number of batched weight updates per epoch.
max_steps: Maximum number of batched weight updates for the training run.
learning_rate_schedule_kwargs: Dictionary containing additional arguments
for a given learning rate schedule.
Returns:
Learning rate schedule.
"""
if 'steps_per_epoch' in learning_rate_schedule_kwargs:
raise ValueError(
'`steps_per_epoch` must not be in `learning_rate_schedule_kwargs`.')
if 'max_steps' in learning_rate_schedule_kwargs:
raise ValueError(
'`max_steps` must not be in `learning_rate_schedule_kwargs`.')
if learning_rate_schedule_name == 'constant':
return constant_learning_rate_schedule(optimizer, initial_learning_rate)
elif learning_rate_schedule_name == 'piecewise_constant_progress_aligned':
return piecewise_constant_progress_aligned_learning_rate_schedule(
optimizer, max_steps, **learning_rate_schedule_kwargs)
elif learning_rate_schedule_name == 'warmup_cosine_decay':
return warmup_cosine_decay_learning_rate_schedule(
optimizer, initial_learning_rate, steps_per_epoch, max_steps,
**learning_rate_schedule_kwargs)
else:
raise ValueError(
f'Unsupported `learning_rate_schedule_name` = `{learning_rate_schedule_name}`'
)
|
dm_nevis-master
|
experiments_torch/training/learning_rate_schedules.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentations.
Augmentations are intended to be used in the context of a map function in a
tf.data.Dataset. This means that the functions must be applyable in tensorflow
graph mode [1]. To achieve this, any run-time variable shape must be managed
strictly using tensorflow functions (such as tf.cond(...)).
This can be tested using the test fixutres in the tests for this module.
[1]: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map
"""
import dataclasses
import functools
from typing import Any, Callable, Dict, Optional, Sequence, Tuple
from dm_nevis.benchmarker.datasets import datasets
import tensorflow as tf
AugmentFn = Callable[[datasets.MiniBatch], datasets.MiniBatch]
Kwargs = Dict[str, Any]
DEFAULT_PADDING = 0
# offset_y, offset_x, crop_height, crop_width
CropWindow = Tuple[int, int, int, int]
AREA_RANGE = (0.08, 1.0)
MAX_ATTEMPTS = 10
ASPECT_RATIO_RANGE = (3 / 4, 4 / 3)
MIN_OBJECT_COVERED = 0.1
def chain(
example: datasets.MiniBatch,
augmentation_ctors_with_kwargs: Sequence[Tuple[AugmentFn, Kwargs]]
) -> datasets.MiniBatch:
"""Applies data augmentations to example sequentially."""
for (ctor, kwargs) in augmentation_ctors_with_kwargs:
augmentation_fn = functools.partial(ctor, **kwargs)
example = augmentation_fn(example)
return example
def standardize_per_image(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Standartizes each image."""
image = tf.image.per_image_standardization(example.image)
return dataclasses.replace(example, image=image)
def random_flip(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Randomly flips each image."""
image = tf.image.random_flip_left_right(example.image)
return dataclasses.replace(example, image=image)
def normalize(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Ensures the images have 3 channels and are in range -1..1."""
# Images from nevis datasets are 0..255, however stored as int64.
# This confuses the other image-preprocessing functions => cast to uint8.
image = example.image
def true_fn():
# no-op for grayscale, results in correct shape for RGB
image_sliced = image[:, :, :1]
return tf.image.grayscale_to_rgb(image_sliced)
is_grayscale = tf.equal(tf.shape(image)[-1], 1)
image = tf.cond(
pred=is_grayscale,
true_fn=true_fn,
false_fn=lambda: image)
# Convert to range -1..1
image = tf.cast(image, dtype=tf.uint8)
image = 2 * tf.image.convert_image_dtype(image, dtype=tf.float32) - 1
return dataclasses.replace(example, image=image)
def _distorted_bounding_box_crop(
image_shape: tf.Tensor,
*,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
seed: Optional[int] = None,
) -> CropWindow:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True,
seed=seed)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
return (offset_y, offset_x, target_height, target_width)
def sample_random_crop_window(image_shape: tf.Tensor,
seed: Optional[int] = None) -> CropWindow:
"""Randomly sample a crop window, given an image size and config.
It may be that the random sampler is unable to satisfy the constraints given
(within an acceptable number of iterations). In this case, the sampler
falls back to returning the result of `pad_and_center_crop_window`, with the
default padding set.
Args:
image_shape: A tensor containing [image_height, image_width, channels].
seed: If specified, a random seed for sampling cropping window.
Returns:
A crop window [min y, min x, height, width] in image coordinates.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
crop_window = _distorted_bounding_box_crop(
image_shape,
bbox=bbox,
min_object_covered=MIN_OBJECT_COVERED,
aspect_ratio_range=ASPECT_RATIO_RANGE,
area_range=AREA_RANGE,
max_attempts=MAX_ATTEMPTS,
seed=seed)
# If the random crop failed, return the center crop.
if tf.reduce_all(tf.equal(image_shape[:2], crop_window[2:])):
crop_window = central_crop_window(image_shape)
return crop_window
def central_crop_window(image_shape: tf.Tensor,
padding: int = DEFAULT_PADDING) -> CropWindow:
"""Compute a crop window for a padded center crop of the given image shape.
Args:
image_shape: The shape of the jpeg [height, width, channels], or [height,
width].
padding: The padding between the input image and the resulting image. The
padding represents the distance between the input image and the output
image at each edge (so that the total number of pixels removed from the
smallest edge is 2 X the padding value.
Returns:
A crop window [y, x, image_size, image_size],
where image_size = min(height, width) - 2 * padding, and y and x are
chosen so that the resutling crop falls in the center of the input image.
"""
# Scrub the channels size, if it was provided.
image_shape = image_shape[:2]
min_image_side = tf.math.reduce_min(image_shape)
image_height = image_shape[0]
image_width = image_shape[1]
# If the padding is larger than the image, no pixels will be returned.
tf.debugging.assert_greater(min_image_side, 2 * padding)
offset_y = tf.cast((image_height - min_image_side) / 2, dtype=tf.int32)
offset_x = tf.cast((image_width - min_image_side) / 2, dtype=tf.int32)
image_size = tf.cast(min_image_side - 2 * padding, tf.int32)
return (offset_y + padding, offset_x + padding, image_size, image_size)
def central_crop_via_cropped_window_and_resize(
example: datasets.MiniBatch, size: Tuple[int, int]) -> datasets.MiniBatch:
"""Extract the central region of the image and resize to the given size."""
crop_window = central_crop_window(tf.shape(example.image))
cropped_image = tf.image.crop_to_bounding_box(example.image, *crop_window)
cropped_image = tf.image.resize(cropped_image, size=size)
return dataclasses.replace(example, image=cropped_image)
def random_crop_via_cropped_window_and_resize(
example: datasets.MiniBatch, size: Tuple[int, int]) -> datasets.MiniBatch:
"""Randomly sample a crop from the image and resize to the given size."""
crop_window = sample_random_crop_window(tf.shape(example.image))
cropped_image = tf.image.crop_to_bounding_box(example.image, *crop_window)
cropped_image = tf.image.resize(cropped_image, size=size)
return dataclasses.replace(example, image=cropped_image)
def central_crop(example: datasets.MiniBatch,
size: Tuple[int, int]) -> datasets.MiniBatch:
"""Extract the central region of the image."""
height = tf.shape(example.image)[0]
width = tf.shape(example.image)[1]
tf.debugging.assert_equal(height, width)
fraction = size[0] / height
image = tf.image.central_crop(example.image, fraction)
return dataclasses.replace(example, image=image)
def random_crop(example: datasets.MiniBatch,
size: Tuple[int, int]) -> datasets.MiniBatch:
"""Randomly sample crops with `size`."""
height, width = size
n_channels = tf.shape(example.image)[-1]
image = tf.image.random_crop(example.image, (height, width, n_channels))
return dataclasses.replace(example, image=image)
def resize(example: datasets.MiniBatch, size: Tuple[int,
int]) -> datasets.MiniBatch:
"""Resizes the image to the given size."""
image = tf.image.resize(example.image, size)
return dataclasses.replace(example, image=image)
|
dm_nevis-master
|
experiments_torch/training/augmentations.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prediction heads."""
from typing import Dict, List, Optional, Set, Tuple, Union, Protocol
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
Loss = torch.Tensor
Metrics = Dict[str, Union[np.ndarray, float]]
TaskKey = tasks.TaskKey
class Head(Protocol):
"""A Prediction head.
Heads combine a prediction layer to map some representation to a prediction
together with a loss function and other diagnostics appropriate for the
kind of prediction.
"""
def predict(self, inputs: torch.Tensor, is_training: bool,
as_probs: bool) -> List[torch.Tensor]:
"""Generates a prediction given the representation `h`.
Args:
inputs: representation to derive predictions from.
is_training: bool
as_probs: bool
Returns:
A list over distribution objects representing the predictions (one for
each label).
"""
def loss_and_metrics(
self,
inputs: torch.Tensor,
targets: torch.Tensor,
is_training: bool = False,
) -> Tuple[Loss, Metrics]:
"""Evaluates the predictions given representations and ground-truth targets.
Args:
inputs: representation to derive predictions from.
targets: ground-truth to evaluate against.
is_training: bool
Returns:
A dictionary with per-example metrics and a scalar "loss".
"""
class CategoricalHead(nn.Module):
"""A categorical prediction head.
Encapsulates a linear layer to predict logits given a representation
and computes relevant metrics such as xent, error, expected-calibration-error
given ground truth labels.
"""
def __init__(self,
features_dim: int,
num_classes: int,
label_smoothing: float = 0.,
name: Optional[str] = None):
super().__init__()
self._num_classes = num_classes
self._label_smoothing = label_smoothing
self._logit_layer = nn.Linear(features_dim, num_classes)
def forward(self, x):
return self._logit_layer(x)
def predict(
self,
inputs: torch.Tensor,
is_training: bool = False,
as_probs: bool = False,
) -> List[torch.Tensor]:
"""Computes class probabilities given representations."""
del is_training
logits = self.forward(inputs)
if as_probs:
return [F.softmax(logits, -1)]
return [logits]
def loss_and_metrics(
self,
inputs: torch.Tensor,
targets: torch.Tensor,
is_training: bool = False,
) -> Tuple[Loss, Metrics]:
"""Computes loss and metrics given representations and target labels."""
assert len(targets.shape) == 1 # [batch_size]
# Categorical distribuion
logits = self.predict(inputs, is_training=is_training)[0]
log_probs = F.log_softmax(logits, dim=-1)
if self._label_smoothing != 0 and is_training:
one_hot_targets = F.one_hot(targets, self._num_classes) # pytype: disable=module-attr
smoothed_targets = (
one_hot_targets * (1 - self._label_smoothing) +
self._label_smoothing / self._num_classes)
neg_log_probs = -log_probs
assert len(neg_log_probs.shape) == 2 # [batch_size, num_classes]
xent = torch.sum(smoothed_targets * neg_log_probs, dim=1)
else:
xent = F.cross_entropy(logits, targets, reduce=False) # pytype: disable=wrong-keyword-args
predicted_labels = logits.argmax(dim=-1)
error = torch.ne(predicted_labels, targets).float()
loss = torch.mean(xent)
return (loss, {
"loss": float(loss.item()),
"xent": xent.detach().cpu().numpy(),
"error": error.detach().cpu().numpy()
})
class MultiLabelHead(nn.Module):
"""A binary multi-label prediction head.
Encapsulates a linear layer to predict logits given a representation
and computes relevant metrics such as cross entropy, error,
expected-calibration-error given ground truth labels.
"""
def __init__(self,
features_dim: int,
num_classes: int,
label_smoothing: float = 0.,
name: Optional[str] = None):
super().__init__()
self._num_classes = num_classes
self._label_smoothing = label_smoothing
self._logit_layer = nn.Linear(features_dim, num_classes)
def forward(self, x):
return self._logit_layer(x)
def predict(self,
inputs: torch.Tensor,
is_training: bool = False,
as_probs=False) -> List[torch.Tensor]:
"""Computes class logits given representations."""
del is_training
logits = self.forward(inputs)
output_distributions = []
for i in range(self._num_classes):
if as_probs:
output_distributions.append(F.sigmoid(logits[:, i]))
else:
output_distributions.append(logits[:, i])
return output_distributions
def loss_and_metrics(self,
inputs: torch.Tensor,
targets: torch.Tensor,
is_training: bool = False) -> Tuple[Loss, Metrics]:
"""Computes loss and metrics given representations and target labels."""
assert len(targets.shape) == 2 # [batch_size, num_classes]
# Product of independent Bernoulli.
predictive_distributions = self.predict(inputs, is_training=is_training)
cross_entropies = []
predicted_labels = []
errors = []
for i, predictive_distribution in enumerate(predictive_distributions):
expected_label = targets[:, i]
if self._label_smoothing != 0 and is_training:
smoothed_targets = (
expected_label * (1 - self._label_smoothing) +
self._label_smoothing / 2)
cross_entropies.append(
F.binary_cross_entropy_with_logits(predictive_distribution,
smoothed_targets))
else:
cross_entropies.append(
F.binary_cross_entropy_with_logits(predictive_distribution,
expected_label))
predicted_label = (F.sigmoid(predictive_distribution) > 0.5).long()
predicted_labels.append(predicted_label)
error = torch.ne(predicted_label, expected_label.long())
errors.append(error.float())
cross_entropies = torch.stack(cross_entropies, dim=-1)
error = torch.stack(errors, dim=-1)
loss = torch.mean(cross_entropies)
return (loss, {
"loss": float(loss.item()),
"xent": cross_entropies.detach().cpu().numpy(),
"error": error.detach().cpu().numpy()
})
def build_head(features_dim: int, task_keys: Set[TaskKey], **kwargs) -> Head:
"""Builds an appropriate head for the given task."""
assert len(task_keys) == 1
task_key = list(task_keys)[0]
task_kind = task_key.kind
if task_kind == tasks.TaskKind.CLASSIFICATION:
return CategoricalHead(
features_dim=features_dim,
num_classes=task_key.metadata.num_classes,
name=f"{task_key.name}_head",
**kwargs)
elif task_kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
return MultiLabelHead(
features_dim=features_dim,
num_classes=task_key.metadata.num_classes,
name=f"{task_key.name}_head",
**kwargs)
else:
raise ValueError(f"Unsupported task kind: {task_kind}")
|
dm_nevis-master
|
experiments_torch/training/heads.py
|
dm_nevis-master
|
experiments_torch/learners/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A learner implemented for the baseline."""
import dataclasses
import functools
from typing import Iterable, Optional, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.learners import learner_interface
from experiments_torch import experiment
import ml_collections
import numpy as np
import tensorflow_datasets as tfds
def learner_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[experiment.LearnerBuilderFn, experiment.ProgramStopper]:
"""Prepares the learner to run on launchpad."""
del learner_config
def _learner_builder():
dataset_lookup, _ = dataset_lookup_builder()
return build_example_learner(dataset_lookup)
def _stopper():
return
return _learner_builder, _stopper
@dataclasses.dataclass(frozen=True)
class ExampleLearnerState:
"""The state for the example learner."""
def build_example_learner(
dataset_lookup: experiment.DatasetLookupFn) -> learner_interface.Learner:
return learner_interface.Learner(
init=init,
train=functools.partial(train, dataset_lookup=dataset_lookup),
predict=functools.partial(predict, dataset_lookup=dataset_lookup),
)
def init() -> ExampleLearnerState:
return ExampleLearnerState()
def train(
event: streams.TrainingEvent,
state: ExampleLearnerState,
write_checkpoint: learner_interface.CheckpointFn,
*,
checkpoint_to_resume: Optional[learner_interface.Checkpoint] = None,
dataset_lookup: experiment.DatasetLookupFn,
) -> Tuple[ExampleLearnerState, learner_interface.ResourceUsage]:
"""Trains the learner given the given dataset."""
del write_checkpoint, checkpoint_to_resume
dataset = dataset_lookup(event.train_dataset_key)
logging.info("Got train task: %s with %s examples", dataset.task_key,
dataset.num_examples)
return state, learner_interface.ResourceUsage(
floating_point_operations=0.0,
peak_parameter_count=0,
peak_parameter_size_bytes=0)
def predict(
event: streams.PredictionEvent,
state: ExampleLearnerState,
*,
dataset_lookup: experiment.DatasetLookupFn,
) -> Iterable[learner_interface.Predictions]:
"""Computes predictions for each example in the referenced dataset."""
del state
dataset = dataset_lookup(event.dataset_key)
logging.info("Got predict task: %s with %s examples", dataset.task_key,
dataset.num_examples)
batch_size = 1
ds = dataset.builder_fn(shuffle=False).batch(batch_size=batch_size)
for batch in tfds.as_numpy(ds):
# For now, we make empty predictions.
if dataset.task_key.kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
output = [
np.zeros((batch_size,))
for _ in range(dataset.task_key.metadata.num_classes)
]
elif dataset.task_key.kind == tasks.TaskKind.CLASSIFICATION:
output = [np.zeros((batch_size, dataset.task_key.metadata.num_classes))]
else:
raise ValueError(f"Unknown task kind: {dataset.task_key.kind}")
yield learner_interface.Predictions(batch=batch, output=output)
|
dm_nevis-master
|
experiments_torch/learners/example/example_learner.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_torch/learners/example/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A finetuning learner.
This learner supports a number of strategies for initializing the train state
for each sequential training task. One such strategy is "independent". In This
case, each model is trained independently.
"""
# pytype: disable=missing-parameter
# pytype: disable=not-callable
import copy
import dataclasses
import enum
import functools
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.learners import learner_interface
from experiments_torch import experiment
from experiments_torch.training import dataloaders
from experiments_torch.training import learning_rate_schedules
from experiments_torch.training import models
from experiments_torch.training import resources
from experiments_torch.training import trainer
import ml_collections
import numpy as np
import torch
from torch.optim import lr_scheduler
CHECKPOINT_INTERVAL = 10_000
LOG_INTERVAL_SECONDS = 5
MAX_LR_DECAY_STEPS = 4
FINETUNING_DATAFRAME_NAME = "finetuning"
DUMMY_TASK_NAME_RANDOM_PARAMS = "randomly_initialized_params"
SUPPORTED_TASK_KINDS = frozenset([
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
])
class FinetuningStrategy(enum.Enum):
INDEPENDENT = 0 # Randomly initialize the state for each model.
PREVIOUS = 1 # Always initialize from train state from previous task.
def learner_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[experiment.LearnerBuilderFn, experiment.ProgramStopper]:
"""Prepares the learner to run on launchpad."""
def stopper():
logging.info("Closing program")
def build_learner():
dataset_lookup, task_keys = dataset_lookup_builder()
return build_finetuning_learner(learner_config, dataset_lookup, task_keys)
return build_learner, stopper
@dataclasses.dataclass(frozen=True)
class TrainingContext:
train_task_index: int
config: ml_collections.ConfigDict
event: streams.TrainingEvent
initial_train_state: Optional[trainer.TrainState] = dataclasses.field(
repr=False)
@dataclasses.dataclass(frozen=True)
class FinetuningLearnerState:
"""A dataclass to hold the state of the learner.
Attributes:
seen_train_events: The (ordered) sequence of train events encountered by
this learner.
train_states: The sequence of tasks and the achieved final train states or
the checkpoint paths to the train states.
"""
seen_train_events: List[streams.TrainingEvent]
train_states: List[Tuple[tasks.TaskKey, str]]
def build_finetuning_learner(
config: ml_collections.ConfigDict,
dataset_lookup: experiment.DatasetLookupFn,
task_keys: Sequence[tasks.TaskKey],
) -> learner_interface.Learner:
"""Builds the learner.
Args:
config: The configuration to use for this learner.
dataset_lookup: A function used to construct train and predict datasets.
task_keys: The tasks that the returned learner should support.
Returns:
A learner satisfying the learner_interface.Learner interface.
"""
_verify_all_tasks_supported(task_keys)
finetuning_metrics = config.get_metrics_writer("finetuning")
cost_function = _cost_function_builder(dataset_lookup, task_keys)
return learner_interface.Learner(
init=functools.partial(
init,
config=config,
),
train=functools.partial(
train,
dataset_lookup=dataset_lookup,
config=config,
cost_function=cost_function,
finetuning_metrics=finetuning_metrics,
),
predict=functools.partial(
predict,
config=config,
dataset_lookup=dataset_lookup,
),
)
def init(
*,
config: ml_collections.ConfigDict,
) -> learner_interface.LearnerState:
"""A function to initialize the train state for the learner.
Args:
config: The learner configuration.
Returns:
The initial learner state, before the learner has seen any training data.
"""
del config
return FinetuningLearnerState(
seen_train_events=[],
train_states=[],
)
def train(
event: streams.TrainingEvent,
state: learner_interface.LearnerState,
write_checkpoint: learner_interface.CheckpointFn,
checkpoint_to_resume: learner_interface.Checkpoint = None,
*,
dataset_lookup: experiment.DatasetLookupFn,
config: ml_collections.ConfigDict,
cost_function: Any,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Tuple[learner_interface.LearnerState, learner_interface.ResourceUsage]:
"""Trains the learner given the given dataset.
Args:
event: The training event that the learner should read training data from.
state: The learner's state before training began.
write_checkpoint: A function to write intermediate checkpoints during this
training event.
checkpoint_to_resume: If this training event was previously interrupted,
then this training event may be initialized from a checkpoint that was
previously written by the write_checkpoint function.
dataset_lookup: A lookup function for fetching the dataset by key.
config: The learner config.
cost_function: The function optimizing the model.
finetuning_metrics: A metrics writer for writing the selected state that was
finetuned from.
Returns:
A new learner state, containing the knowledge accrued during training, along
with the resources used during training.
"""
del checkpoint_to_resume
task_key = dataset_lookup(event.train_dataset_key).task_key
initial_train_state = _get_train_state_for_finetuning(config, task_key, state,
finetuning_metrics)
context = TrainingContext(
train_task_index=len(state.seen_train_events),
config=config,
event=event,
initial_train_state=initial_train_state,
)
_, new_train_state, resources_used = cost_function(
context, write_checkpoint=write_checkpoint)
train_state_checkpoint_path = os.path.join(
config.train_states_checkpoint_path,
f"train_task_index_{len(state.seen_train_events)}_{task_key.name}.pkl")
trainer.save_train_state(train_state_checkpoint_path, task_key,
new_train_state)
return (
dataclasses.replace(
state,
train_states=[
*state.train_states, (task_key, train_state_checkpoint_path)
],
seen_train_events=[*state.seen_train_events, event],
),
resources_used,
)
def predict(
event: streams.PredictionEvent,
state: learner_interface.LearnerState,
*,
config: ml_collections.ConfigDict,
dataset_lookup: experiment.DatasetLookupFn,
) -> Iterable[learner_interface.Predictions]:
"""Computes predictions for each example in the referenced dataset.
Args:
event: An event containing a dataset key to compute predictions for.
state: The state of the learner, containing all knowledge accrued by the
learner as it was exposed to previous training events.
config: The config of the learner.
dataset_lookup: A function to fetch datasets by key.
Yields:
Batches of predictions from the model, given the learner state, over the
dataset loaded from the event.
"""
dataset = dataset_lookup(event.dataset_key)
task_key = dataset.task_key
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
batch_iter = dataloaders.build_prediction_iterator(dataset, eval_augment_fn,
config.batch_size)
train_state = _get_latest_train_state_for_predictions(state, task_key)
model = train_state.model
model.to(config.device)
completed = 0
for batch in batch_iter():
logging.log_every_n_seconds(logging.INFO, "Completed predictions: %d/%d",
10, completed, dataset.num_examples)
completed += batch.image.shape[0]
probabilities = model.predict(
torch.tensor(batch.image).to(config.device), task_key)
yield learner_interface.Predictions(batch=batch, output=probabilities)
@dataclasses.dataclass
class FitWithEarlyStoppingState:
step: int
train_state: trainer.TrainState
best_age: int
best_metric: np.number
best_train_state: Optional[trainer.TrainState]
validation_metric: str
def _cost_function_builder(dataset_lookup_fn: experiment.DatasetLookupFn,
task_keys: Sequence[tasks.TaskKey]) -> Any:
"""Constructs the cost function used in the hyper search."""
def cost_function(train_context,
*,
write_checkpoint,
checkpoint_to_resume=None):
config = train_context.config
logging.info("Computing cost function with learner config: %s", config)
# Data for work-unit
train_dataset = dataset_lookup_fn(train_context.event.train_dataset_key)
valid_dataset = dataset_lookup_fn(train_context.event.dev_dataset_key)
task_key = train_dataset.task_key
train_augment_fn = functools.partial(config.augmentations.train.ctor,
**config.augmentations.train.kwargs)
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
train_iter_fn = dataloaders.build_train_iterator(train_dataset,
train_augment_fn,
config.batch_size)
valid_iter_fn = dataloaders.build_prediction_iterator(
valid_dataset, eval_augment_fn, config.batch_size)
steps_per_epoch = train_dataset.num_examples // config.batch_size + 1
initial_train_state = _initialize_train_from_context(
train_context, config, task_keys, config.optimization.optimizer.ctor,
config.optimization.optimizer.kwargs)
optimizer = initial_train_state.optimizer
# If learning rate schedule is provided, we use it.
learning_rate_schedule = learning_rate_schedules.build_learning_rate_schedule(
optimizer, config.optimization.learning_rate_schedule.name,
config.optimization.learning_rate_schedule.init_learning_rate,
steps_per_epoch, config.max_steps,
config.optimization.learning_rate_schedule.kwargs)
train_metric_writer = config.get_metrics_writer(
"learner_train", index_of_training_event=train_context.train_task_index)
eval_metric_writer = config.get_metrics_writer(
"learner_eval", index_of_training_event=train_context.train_task_index)
cost, _, train_state, flops_used = fit_with_early_stopping(
initial_train_state=initial_train_state,
learning_rate_scheduler=learning_rate_schedule,
task_key=task_key,
train_iter_fn=train_iter_fn,
valid_iter_fn=valid_iter_fn,
validation_metric=config.validation_metric,
run_validation_every_n_steps=config.run_validation_every_n_steps,
early_stopping_grace=config.early_stopping_grace,
max_steps=config.max_steps,
device=config.device,
train_metrics_writer=train_metric_writer,
validation_metrics_writer=eval_metric_writer,
write_checkpoint=write_checkpoint,
checkpoint_to_resume=checkpoint_to_resume)
resources_used = learner_interface.ResourceUsage(
floating_point_operations=flops_used)
train_metric_writer.flush()
train_metric_writer.close()
eval_metric_writer.flush()
eval_metric_writer.close()
train_state.model = train_state.model.cpu()
return cost, train_state, resources_used
return cost_function
def _initialize_train_from_context(train_context, config, task_keys, opt_ctor,
opt_kwargs) -> trainer.TrainState:
"""Initializes trainer state based on the context."""
if train_context.initial_train_state is not None:
logging.info("Initializing train state from a previous state")
model = copy.deepcopy(train_context.initial_train_state.model)
load_params_fun = None
else:
logging.info("Initializing a new train state")
model = _build_model(config, task_keys)
load_params_fun = config.load_params_fn
if "load_params_fn_with_kwargs" in config:
load_params_fun = functools.partial(
config.load_params_fn_with_kwargs.fun,
**config.load_params_fn_with_kwargs.kwargs)
return trainer.init_train_state(model, opt_ctor, opt_kwargs, load_params_fun)
def _run_validation(
model: models.Model,
task_key: tasks.TaskKey,
state: FitWithEarlyStoppingState,
valid_data_iter: Iterator[datasets.MiniBatch],
device: torch.device,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any]]:
"""Runs validation and returns the cost and metrics."""
start_time = time.monotonic()
metrics = _validate_batches(model, task_key, valid_data_iter, device)
elapsed = time.monotonic() - start_time
metrics.update(
step=state.step,
validation_runtime_seconds=elapsed,
)
if additional_diagnostics:
metrics.update(additional_diagnostics)
logging.info(
"Validation completed in %.3f seconds.\n"
"Validation metrics for step %d:\n%s", elapsed, state.step,
"\n".join(f" {k}: {_prettify_value(v)}" for k, v in metrics.items()))
return float(metrics[state.validation_metric]), metrics
def _validate_batches(model: models.Model, task_key: tasks.TaskKey,
batch_iter: dataloaders.BatchIterator,
device: torch.device) -> Dict[str, float]:
"""Performs a validation run and report the metrics computed."""
all_diagnostics = []
for batch in batch_iter:
targets = batch.label
if targets is None:
targets = batch.multi_label_one_hot
_, diagnostics = model.loss_and_metrics(
torch.tensor(batch.image).to(device),
torch.tensor(targets).to(device),
is_training=False,
task_key=task_key)
diagnostics = {k: np.mean(v) for k, v in diagnostics.items()}
all_diagnostics.append(diagnostics)
return _dict_mean(all_diagnostics)
def _dict_mean(dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def fit_with_early_stopping(
initial_train_state: trainer.TrainState,
learning_rate_scheduler: lr_scheduler._LRScheduler,
task_key: tasks.TaskKey,
train_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
valid_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
validation_metric: str,
run_validation_every_n_steps: int,
early_stopping_grace: int,
max_steps: int,
device: torch.device,
train_metrics_writer: datawriter_interface.DataWriter,
validation_metrics_writer: datawriter_interface.DataWriter,
write_checkpoint: Callable[[FitWithEarlyStoppingState], None],
checkpoint_to_resume: Optional[FitWithEarlyStoppingState] = None,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any], trainer.TrainState, float]:
"""Fits model with early stopping and dynamic LR schduling."""
additional_diagnostics = additional_diagnostics or {}
if checkpoint_to_resume is None:
logging.info("Starting new train loop...")
state = FitWithEarlyStoppingState( # pytype: disable=wrong-arg-types # mapping-is-not-sequence
step=0,
best_age=0,
best_metric=np.inf,
train_state=initial_train_state,
best_train_state=None,
validation_metric=validation_metric,
)
else:
logging.info("Resuming train loop from checkpoint...")
state: FitWithEarlyStoppingState = checkpoint_to_resume
step_timer = trainer.StepCountEstimator()
train_iter = train_iter_fn()
model, optimizer = initial_train_state.model, initial_train_state.optimizer
model.to(device)
while state.step < max_steps:
start_time = time.monotonic()
batch = next(train_iter)
logging.log_every_n_seconds(logging.INFO,
"Step: %d/%d, Batch %s, Steps per second: %f",
LOG_INTERVAL_SECONDS, state.step + 1, max_steps,
batch, step_timer.estimated_steps_per_second())
targets = batch.label
if targets is None:
targets = batch.multi_label_one_hot
optimizer.zero_grad()
loss, train_metrics = model.loss_and_metrics(
torch.tensor(batch.image).to(device),
torch.tensor(targets).to(device),
is_training=True,
task_key=task_key)
loss.backward()
optimizer.step()
learning_rate_scheduler.step()
train_metrics = {k: np.mean(v) for k, v in train_metrics.items()}
train_metrics.update(
step=state.step,
steps_per_second=step_timer.estimated_steps_per_second())
train_metrics.update(additional_diagnostics)
train_metrics_writer.write(train_metrics)
if state.step % CHECKPOINT_INTERVAL == 0 and state.step != 0:
logging.info("Writing checkpoint at step %d", state.step)
write_checkpoint(state)
if state.step % run_validation_every_n_steps == 0:
validation_metric, valid_metrics = _run_validation(
model, task_key, state, valid_iter_fn(), device)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = trainer.TrainState(
model=copy.deepcopy(model).cpu(), optimizer=None)
state.best_age = 0
else:
state.best_age += 1
if state.best_age >= early_stopping_grace:
logging.info("Validation metrics plateaued, stopping training.")
break
step_timer.add_measurement(time.monotonic() - start_time)
state.step += 1
logging.info("Running final validation.")
validation_metric, valid_metrics = _run_validation(model, task_key, state,
valid_iter_fn(), device)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = trainer.TrainState(
model=copy.deepcopy(model).cpu(), optimizer=None)
state.best_age = 0
train_flops = state.step * _estimate_flops(model, batch, task_key)
return validation_metric, valid_metrics, state.best_train_state, train_flops
def _estimate_flops(model: models.Model, batch: datasets.MiniBatch,
task_key: tasks.TaskKey) -> float:
"""Estimates number of flops doing forward/backward on a single batch."""
targets = batch.label
if targets is None:
targets = batch.multi_label_one_hot
x, y = torch.tensor(batch.image), torch.tensor(targets)
def function():
loss, _ = model.loss_and_metrics(x, y, is_training=True, task_key=task_key)
loss.backward()
flops = resources.estimate_flops(model, function)
return flops
def _get_train_state_for_finetuning(
config: ml_collections.ConfigDict,
task_key: tasks.TaskKey,
state: FinetuningLearnerState,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Optional[trainer.TrainState]:
"""Optionally returns a train state to fine tune from."""
if config.finetuning.strategy is FinetuningStrategy.INDEPENDENT:
logging.info("For independent training, no initial train state is used %s",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
elif config.finetuning.strategy is FinetuningStrategy.PREVIOUS:
if not state.train_states:
logging.info(
"Finetuning enabled for %s, but there are no previous tasks.",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
else:
source_task, train_state_checkpoint_path = state.train_states[-1]
train_state = trainer.restore_train_state(train_state_checkpoint_path)
logging.info("Finetuning %s from previous task: %s.", task_key,
source_task)
_write_finetuning_entry(finetuning_metrics, state, task_key, source_task)
return train_state
raise ValueError(f"Unsupported strategy: {config.finetuning_strategy}")
def _verify_all_tasks_supported(task_keys: Iterable[tasks.TaskKey]) -> None:
unsupported_tasks = set(key.kind for key in task_keys) - SUPPORTED_TASK_KINDS
if unsupported_tasks:
raise NotImplementedError(
f"Got unsupported tasks: {unsupported_tasks}. "
"If required, you may use streams.FilteredStream "
"to construct a stream that removes cetain tasks.")
def _get_latest_train_state_for_predictions(
state: FinetuningLearnerState,
task_key: tasks.TaskKey) -> trainer.TrainState:
for key, train_state_checkpoint_path in reversed(state.train_states):
if key == task_key:
return trainer.restore_train_state(train_state_checkpoint_path)
raise ValueError(
f"Cannot compute predicions for task that has not been trained: {task_key}"
)
def _build_model(config: ml_collections.ConfigDict,
task_keys: Sequence[tasks.TaskKey]) -> models.Model:
"""Constructs the parameterized, trainable model."""
# In this learner, every task has its own set of parameters, and
# so the backbone should be identical for all heads.
return models.build_model(
functools.partial(config.model.ctor, **config.model.kwargs),
supported_tasks=task_keys,
head_kwargs={"label_smoothing": config.label_smoothing})
def _write_finetuning_entry(
finetuning_metrics: datawriter_interface.DataWriter,
state: FinetuningLearnerState,
current_task: tasks.TaskKey,
finetune_from_task: Optional[tasks.TaskKey],
) -> None:
"""Writes the selected task to finetune from."""
if finetune_from_task:
finetune_from_task_name = finetune_from_task.name
else:
finetune_from_task_name = None
finetuning_metrics.write({
"index_of_train_event": len(state.train_states),
"current_task": current_task.name,
"finetune_from_task": finetune_from_task_name,
})
def _prettify_value(value):
try:
return f"{value:.2f}"
except ValueError:
return f"{value}"
|
dm_nevis-master
|
experiments_torch/learners/finetuning/finetuning_learner.py
|
dm_nevis-master
|
experiments_torch/learners/finetuning/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain learner on only imagenet using a ResNet18 (cheap architecture).
The resulting checkpoint can then be taken as initialization for all subsequent
experiments using the pre-trained baseline (PT).
"""
import functools
import os
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.learners.finetuning import finetuning_learner
from experiments_torch.training import augmentations
from experiments_torch.training import resnet
from dm_nevis.streams import nevis_stream
import ml_collections
from torch import optim
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 50_000
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': streams.FilteredStream,
'kwargs': {
'stream_ctor':
nevis_stream.NevisStream,
'supported_task_kinds': [tasks.TaskKind.CLASSIFICATION],
'stream_variant':
nevis_stream.NevisStreamVariant.IMAGENET_ONLY,
'predict_event_splits': (
nevis_stream.Split.DEV,
nevis_stream.Split.DEV_TEST,
nevis_stream.Split.TEST,
),
},
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.resnet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optim.SGD,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
|
dm_nevis-master
|
experiments_torch/configs/pretrain_imagenet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning learner configuration which is the basis for most experiments."""
import functools
import os
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.learners.finetuning import finetuning_learner
from experiments_torch.training import augmentations
from experiments_torch.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
from torch import optim
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 25_000 # Reduced number of gradient steps.
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.resnet18, # Smaller network.
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optim.SGD,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
|
dm_nevis-master
|
experiments_torch/configs/cheap_finetuning_ind.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_torch/configs/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning learner configuration which is the basis for most experiments."""
import functools
import os
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.learners.finetuning import finetuning_learner
from experiments_torch.training import augmentations
from experiments_torch.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
from torch import optim
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 10_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.resnet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optim.SGD,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
|
dm_nevis-master
|
experiments_torch/configs/finetuning_ind.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example learner config.
This is for use with `learners/example`, and is intended to show how to
implement a bare-bones learner.
"""
import functools
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.learners.example import example_learner
from dm_nevis.streams import nevis_stream
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
return ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': nevis_stream.NevisStream,
'kwargs': {
'stream_variant': nevis_stream.NevisStreamVariant.DEBUG,
}
},
'learner': {
'learner_builder': example_learner.learner_builder,
'config': {
'get_metrics_writer': metrics_logger_fn
}
},
},
})
|
dm_nevis-master
|
experiments_torch/configs/example.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning pretrained model from a checkpoint."""
import functools
import os
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.environment import pretrained_model_loader
from experiments_torch.learners.finetuning import finetuning_learner
from experiments_torch.training import augmentations
from experiments_torch.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
from torch import optim
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 10_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
DEFAULT_PRETRAIN_CHECKPOINT_PATH = os.path.join(DEFAULT_CHECKPOINT_DIR,
'pretraining.pkl')
FREEZE_PRETRAINED_BACKBONE = False
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.resnet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'load_params_fn_with_kwargs': {
'fun':
pretrained_model_loader.load_model_params_from_ckpt,
'kwargs': {
'freeze_pretrained_backbone':
FREEZE_PRETRAINED_BACKBONE,
'checkpoint_path':
DEFAULT_PRETRAIN_CHECKPOINT_PATH
},
},
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optim.SGD,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
|
dm_nevis-master
|
experiments_torch/configs/finetuning_ind_pretrained.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning learner configuration which is the basis for most experiments."""
import functools
import os
from dm_nevis.benchmarker.environment import logger_utils
from experiments_torch.learners.finetuning import finetuning_learner
from experiments_torch.training import augmentations
from experiments_torch.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
from torch import optim
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 10_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy': finetuning_learner.FinetuningStrategy.PREVIOUS
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.resnet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optim.SGD,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
|
dm_nevis-master
|
experiments_torch/configs/finetuning_prev.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A checkpointer that saves with pickle."""
import os
import pickle
from typing import Any, Optional
from absl import logging
class PickleCheckpointer:
"""A checkpointer that saves with pickle.
The current checkpointer will always overwrite the most recent checkpoint
in the bash path.
"""
def __init__(self, base_path: str):
"""Creates a pickle checkpointer.
Args:
base_path: Path to write checkpoints to.
Returns: A checkpointer.
"""
os.makedirs(os.path.dirname(base_path), exist_ok=True)
self.base_path = base_path
def write(self, state: Any) -> None:
"""Writes a checkpoint to the base path.
Args:
state: Arbitrary checkpointable state
"""
logging.info("Saving checkpoint to %s", self.base_path)
partial_path = f"{self.base_path}.part"
with open(partial_path, "wb") as f:
pickle.dump(state, f)
os.rename(partial_path, self.base_path)
def restore(self) -> Optional[Any]:
"""Restores the most recent checkpointed state.
Returns:
The most recent checkpoint that was successfully written using write,
or None if no checkpoint state is available.
"""
if not os.path.exists(self.base_path):
logging.warning("No checkpoint found at %s", self.base_path)
return None
logging.info("Restore checkpoint from %s", self.base_path)
with open(self.base_path, "rb") as f:
state = pickle.load(f)
return state
|
dm_nevis-master
|
experiments_torch/environment/pickle_checkpointer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dummy checkpointer doing nothing."""
from typing import Any, Optional
from absl import logging
class NoOpCheckpointer:
"""A No-Operation checkpointer doing nothing."""
def __init__(self,
*,
namespace: Optional[str] = None,
base_path: Optional[str] = None,
restore_path: Optional[str] = None):
"""Creates a no-op checkpointer.
Args:
namespace: Appended to the base_path, so that checkpoints written with
this writer are independent.
base_path: if set, checkpoints will be written here.
restore_path: path to restore state from.
Returns: A checkpointer.
"""
del namespace, base_path, restore_path
def write(self, state: Any) -> None:
"""Writes a checkpoint.
Args:
state: Arbitrary checkpointable state
"""
del state
logging.warning(
"Received checkpoint write request (ignoring it - no checkpoint will be written)."
)
def restore(self, *, age: int = 0) -> Optional[Any]:
"""Restores the most recent checkpointed state.
Args:
age: if present, the age of the checkpoint to restore.
Returns:
The most recent checkpoint that was successfully written using write,
or None if no checkpoint state is available.
"""
del age
logging.warning(
"Received checkpoint restore request (ignoring it - no checkpoint will be restored)."
)
return None
|
dm_nevis-master
|
experiments_torch/environment/noop_checkpointer.py
|
"""Functions for loading pretrained models from a checkpoint."""
from typing import Tuple, Union, Dict
from absl import logging
from experiments_torch.training import models
from experiments_torch.training import trainer
from torch.nn import parameter
def load_model_params_from_ckpt(
model: models.Model,
freeze_pretrained_backbone: bool = False,
checkpoint_path: str = '',
) -> Tuple[parameter.Parameter, Union[parameter.Parameter, Dict]]: # pylint: disable=g-bare-generic
"""Load pretrained model parameter from a checkpoint.
Args:
model: the model.
freeze_pretrained_backbone: whether to freeze pretrained backbone or not.
checkpoint_path: path to the pretrained checkpointer.
Returns:
updated params split into trainable and frozen.
"""
trainer_state = trainer.restore_train_state(checkpoint_path)
if trainer_state is None or trainer_state.model is None:
return model.backbone.parameters(), {}
restored_model = trainer_state.model
assert isinstance(restored_model, models.Model)
logging.info('Loading pretrained model finished.')
for model_param, restored_model_param in zip(
model.backbone.parameters(), restored_model.backbone.parameters()):
assert model_param.data.shape == restored_model_param.data.shape
model_param.data = restored_model_param.data
model_param.requires_grad = not freeze_pretrained_backbone
if freeze_pretrained_backbone:
return model.backbone.parameters(), model.heads_map.parameters()
else:
return model.parameters(), {}
|
dm_nevis-master
|
experiments_torch/environment/pretrained_model_loader.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_torch/environment/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A datawriter logging on stdout."""
from typing import Any, Mapping
from absl import logging
class LoggingWriter:
"""A datawriter logging on stderr."""
def __init__(self, prefix: str = ""):
self.prefix = f"{prefix}: "
def write(self, metrics_data: Mapping[str, Any]) -> None:
"""Writes metrics data on stdout.
Args:
metrics_data: A mapping of metrics name to metrics value to log.
"""
message = self.prefix + "\n".join(
[f"{k}: {v}" for k, v in metrics_data.items()])
logging.info(message)
def flush(self) -> None:
"""Flushes the buffer and ensure data is actually written."""
logging.flush()
def close(self) -> None:
"""Closes logging writer."""
|
dm_nevis-master
|
experiments_torch/environment/logging_writer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiment."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_jax import experiment
from experiments_jax.configs import finetuning_dknn
from experiments_jax.configs import finetuning_prev
CONFIGS_TO_TEST = [
{
'testcase_name': 'finetuning_learner',
'config': finetuning_prev.get_test_config(),
},
{
'testcase_name': 'finetuning_dknn_learner',
'config': finetuning_dknn.get_test_config(),
},
]
class BaselineTest(parameterized.TestCase):
@parameterized.named_parameters(CONFIGS_TO_TEST)
def test_experiment_runs_and_finishes(self, config):
experiment_config = experiment.config_from_config_dict(config.experiment)
experiment.run_program(experiment_config)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/experiment_test.py
|
dm_nevis-master
|
experiments_jax/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a launchpad program for benchmarking learners.
This module provides an interface for constructing a launchpad program that can
benchmark learners using the benchmarker. Learners are configured using
instances of the `ExperimentConfig` class. This class may be initialized from an
`ml_collections.ConfigDict` using the `config_from_config_dict` function.
To benchmark a learner on launchpad, users must provide a function
`launchpad_learner_builder` that,
1) adds any learner-specific launchpad nodes to the launchpad program and,
2) returns a callable for building the learner.
We opt to pass around callable "builder" functions rather than passing around
instantiated objects, since these builder functions may be called directly on
the launchpad nodes where the objects are to be instantiated. This means that we
do not require that the object instances be serializable, and means that
launcher program does not have to initialize the stream or learner in order to
build the programs being launched.
When running on XManager, launchpad requires information on the resource types
to use for each program group. Every program defined by this module will
contain an `environment` group, which is the leader thread running the outer
loop of the environment. A single node will be started in this group
containing the environment. This node will also instantiate the learner
using the builder function returned by `launchpad_learner_builder`. For learners
that require only a single node, it may suffice to allocate a sufficiently large
resource type to the environment group.
"""
import dataclasses
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Protocol
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.environment import environment
from dm_nevis.benchmarker.learners import learner_interface
from experiments_jax.environment import noop_checkpointer
from experiments_jax.metrics import nevis_metrics
import ml_collections
ProgramStopper = Callable[[], None]
LearnerBuilderFn = Callable[[], learner_interface.Learner]
StreamBuilderFn = Callable[[], streams.Stream]
DatasetLookupFn = Callable[[streams.DatasetKey], datasets.Dataset]
DatasetLookupBuilderFn = Callable[[], Tuple[DatasetLookupFn,
Sequence[tasks.TaskKey]]]
BENCHMARKER_DATAFRAME = "benchmark"
class MetaLearnerBuilderFn(Protocol):
"""The signature of the function that prepares a learner to run on launchpad.
Learners are given access to the launchpad program, which allows them to
add auxiliary nodes to the launchpad program. The function then returns a
callable used to initialize the learner. Note that the returned callable will
be executed diretly on the node running the environment, this means that the
learner does not need to be serializable. Similarly, the dataset lookup is
wrapped in a builder function. This builder function must be serializable,
but the actual dataset lookup returned from the builder need not be.
In order to ensure graceful termination when using launchpad with threads
(which we use for running tests on TAP), learners can provide a function for
gracefully terminating any resources that they have spawned.
"""
def __call__(
self, *, dataset_lookup_builder: DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[LearnerBuilderFn, ProgramStopper]:
"""Callable used to initialize the learner.
Args:
dataset_lookup_builder: A function that returns a dataset lookup, and the
sequence of training task keys that will be fed to the learner. This is
a 'builder' function since we want to be able to construct the objects
directly on the machines where they will run, rather than constructing
them in the launchpad main process and then pickling the functions.
learner_config: The learner-specific configuration.
Returns:
A function for constructing a learner satisfying the learner interface,
and a function for gracefully stopping the learner's resources.
"""
@dataclasses.dataclass
class LearnerConfig:
learner_builder: MetaLearnerBuilderFn
config: ml_collections.ConfigDict
@dataclasses.dataclass
class StreamConfig:
ctor: Callable[..., streams.Stream]
kwargs: Mapping[str, Any]
@dataclasses.dataclass
class ExperimentConfig:
resume_from_checkpoint_path: str
stream: StreamConfig
learner: LearnerConfig
def config_from_config_dict(cfg: ml_collections.ConfigDict) -> ExperimentConfig:
"""Constructs a typed experiment config from an untyped config dict."""
resume_from_checkpoint_path = cfg.resume_from_checkpoint_path
stream_config = StreamConfig(**cfg.stream)
learner_config = LearnerConfig(**cfg.learner)
return ExperimentConfig(
resume_from_checkpoint_path=resume_from_checkpoint_path,
stream=stream_config,
learner=learner_config,
)
def _stopper():
return
def run_program(config: ExperimentConfig):
"""Prepares a launchpad program to be executed."""
stream_builder, dataset_lookup_builder = _stream_builders(config.stream)
logging.info("Building learner to run on launchpad")
learner_builder, learner_stopper = config.learner.learner_builder(
dataset_lookup_builder=dataset_lookup_builder,
learner_config=config.learner.config,
)
benchmark_metrics_writer = config.learner.config.get_metrics_writer(
"benchmarker")
return _run_environment(config.resume_from_checkpoint_path, stream_builder,
learner_builder, learner_stopper, _stopper,
benchmark_metrics_writer)
def _run_environment(checkpoint_restore_path: Optional[str],
stream_builder: StreamBuilderFn,
learner_builder: LearnerBuilderFn,
learner_stopper: ProgramStopper, stopper: ProgramStopper,
benchmark_metrics_writer: datawriter_interface.DataWriter):
"""Runs the environment."""
learner = learner_builder()
stream = stream_builder()
checkpointer = noop_checkpointer.NoOpCheckpointer(
restore_path=checkpoint_restore_path)
metrics = nevis_metrics.nevis_metrics(stream.get_dataset_by_key,
benchmark_metrics_writer)
optional_checkpoint_to_resume = checkpointer.restore()
output = environment.run(
learner,
stream,
metrics,
write_checkpoint=checkpointer.write,
checkpoint_to_resume=optional_checkpoint_to_resume,
)
metrics = {
**output.results,
**dataclasses.asdict(output.train_resources_used)
}
logging.info("Benchmark Results: %s", metrics)
benchmark_metrics_writer.close() # Flush and close metrics writer
logging.info("Stopping Launchpad...")
learner_stopper()
stopper()
def _stream_builders(
config: StreamConfig) -> Tuple[StreamBuilderFn, DatasetLookupBuilderFn]:
"""Builds functions that can instantiate the stream and dataset lookup."""
def stream_builder():
return config.ctor(**config.kwargs)
def dataset_lookup_builder():
stream = stream_builder()
task_keys = _all_train_task_keys(stream)
return stream.get_dataset_by_key, task_keys
return stream_builder, dataset_lookup_builder
def _all_train_task_keys(stream: streams.Stream) -> Sequence[tasks.TaskKey]:
task_keys = []
# TODO: Consider adding this to the stream interface.
for event in stream.events():
if isinstance(event, streams.TrainingEvent):
dataset = stream.get_dataset_by_key(event.train_dataset_key)
task_keys.append(dataset.task_key)
return task_keys
|
dm_nevis-master
|
experiments_jax/experiment.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint for the JAX experiments."""
from collections.abc import Sequence
from absl import app
from experiments_jax import experiment
from ml_collections import config_flags
_CONFIG = config_flags.DEFINE_config_file('config', None, 'Configuration File')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
config = _CONFIG.value
experiment.run_program(config.experiment)
if __name__ == '__main__':
app.run(main)
|
dm_nevis-master
|
experiments_jax/launch.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for the Nevis project."""
import collections
import dataclasses
import io
import os
from typing import Callable, Iterator, Mapping, Optional, Sequence, Union
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import classification_metrics
from dm_nevis.benchmarker.metrics import metrics_aggregators
from dm_nevis.benchmarker.metrics import multi_label_classification_metrics
import numpy as np
from tensorflow.io import gfile
KNOWN_SPLIT_SUFFICES = frozenset([
"train",
"dev",
"train_and_dev",
"dev_test",
"test",
])
UNKNOWN_SPLIT_NAME = "unknown_split"
DEFAULT_OUTPUT_DIR = os.environ.get("NEVIS_OUTPUT_DIR", "/tmp/nevis_output_dir")
@dataclasses.dataclass(frozen=True)
class PredictionMetrics:
event: streams.PredictionEvent
task: tasks.TaskKey
metrics: Union[
classification_metrics.ClassificationMetrics,
multi_label_classification_metrics.MultiLabelClassificationMetrics]
@dataclasses.dataclass(frozen=True)
class TrainMetrics:
event: streams.TrainingEvent
task: tasks.TaskKey
resources_used: learner_interface.ResourceUsage
@dataclasses.dataclass(frozen=True)
class NevisMetricsState:
"""The metrics state for this aggregator.
We maintain a 1-to-1 relationship between events in the stream and entries
in the (ordered) sequence of metrics objects.
"""
predictions_dir: str
metrics: Sequence[Union[PredictionMetrics, TrainMetrics]]
def nevis_metrics(
dataset_lookup: Callable[[streams.DatasetKey], datasets.Dataset],
metrics_writer: datawriter_interface.DataWriter
) -> metrics_aggregators.MetricsAggregator:
"""Returns a metrics aggregator for the Nevis stream.
This aggregator computes common classification metrics for every
prediction event in the stream. Once the stream has finished, the aggregator
will fetch the final computed metrics for each task, and then compute an
overall normalized accuracy for each of these final computed metrics,
normalized by the total number of examples.
Args:
dataset_lookup: A callable to retrieve datasets given the dataset key.
metrics_writer: A pipe to write debug metrics to. This will be written each
time aggregate is called.
Returns:
A metrics aggregator for use in the Nevis stream and with the benchmarker.
"""
def init() -> NevisMetricsState:
logging.info("Initializing metrics")
predictions_dir = _create_output_dir()
logging.info("Writing raw predictions to %s", predictions_dir)
return NevisMetricsState(
predictions_dir=predictions_dir,
metrics=[],
)
def aggregate_train_event(
state: NevisMetricsState,
event: streams.TrainingEvent,
resources_used: learner_interface.ResourceUsage,
) -> NevisMetricsState:
task_key = dataset_lookup(event.dev_dataset_key).task_key
return dataclasses.replace(
state,
metrics=[
*state.metrics,
TrainMetrics(event, task_key, resources_used),
],
)
def aggregate_predict_event(
state: NevisMetricsState,
event: streams.PredictionEvent,
predictions: Iterator[learner_interface.Predictions],
) -> NevisMetricsState:
resources_used = _combined_train_resources_used(state)
dataset = dataset_lookup(event.dataset_key)
task = dataset.task_key
task_kind = task.kind
outdir = os.path.join(
state.predictions_dir,
f"event_{len(state.metrics)}",
)
if not gfile.exists(outdir):
gfile.makedirs(outdir)
path = os.path.join(outdir, "raw_predictions.npz")
with WrappedPredictionsWriter(predictions, path=path, task=task) as wrapped:
if task_kind == tasks.TaskKind.CLASSIFICATION:
metrics = classification_metrics.compute_metrics(wrapped)
elif task_kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
metrics = multi_label_classification_metrics.compute_metrics(wrapped)
else:
raise NotImplementedError(f"Unsupported task kind: {task_kind}.")
payload = {
"raw_predictions_and_targets_path": path,
"stream_index": len(state.metrics),
"index_of_most_recent_train_event": _num_train_events(state) - 1,
"task_name": task.name,
"task_kind": str(task.kind),
"dataset_key": str(event.dataset_key),
"data_split": _try_to_extract_split(event.dataset_key),
"cumulative_train_flops_used": resources_used.floating_point_operations,
"peak_parameter_count": resources_used.peak_parameter_count,
"peak_parameter_size_bytes": resources_used.peak_parameter_size_bytes,
**metrics._asdict(),
}
logging.info("Metrics for task %s: %s", task.name, payload)
metrics_writer.write(payload)
metrics_writer.flush()
return dataclasses.replace(
state,
metrics=[*state.metrics,
PredictionMetrics(event, task, metrics)],
)
return metrics_aggregators.MetricsAggregator(init, aggregate_train_event,
aggregate_predict_event,
_compute_results)
def _compute_results(state: NevisMetricsState) -> metrics_aggregators.Results:
"""Compute statistics over the stream."""
prediction_metrics_by_split = _extract_prediction_metrics_by_split(state)
results = {}
for split, metrics in prediction_metrics_by_split.items():
single_label_results = _compute_single_label_results(metrics)
multi_label_results = _compute_multi_label_results(metrics)
for key, value in single_label_results.items():
results[f"{split}_{key}"] = value
for key, value in multi_label_results.items():
results[f"{split}_{key}"] = value
return results
def _extract_prediction_metrics_by_split(
state: NevisMetricsState) -> Mapping[str, Sequence[PredictionMetrics]]:
"""Separates out the predict metrics by dataset split name."""
predict_metrics_by_split = collections.defaultdict(list)
for m in state.metrics:
if not isinstance(m, PredictionMetrics):
continue
split = _try_to_extract_split(m.event.dataset_key) or UNKNOWN_SPLIT_NAME
predict_metrics_by_split[split].append(m)
return dict(predict_metrics_by_split)
def _compute_single_label_results(
metrics: Sequence[PredictionMetrics]) -> metrics_aggregators.Results:
"""Computes results for single class case."""
num_events = 0
top_one_correct = 0
num_examples = 0
total_accuracy = 0.0
for m in metrics:
if not isinstance(m.metrics, classification_metrics.ClassificationMetrics):
continue
num_events += 1
total_accuracy += m.metrics.top_one_accuracy
top_one_correct += m.metrics.top_one_correct
num_examples += m.metrics.num_examples
if num_examples == 0:
weighted_accuracy = float("nan")
accuracy = float("nan")
else:
weighted_accuracy = top_one_correct / num_examples
accuracy = total_accuracy / num_events
return {
"weighted_average_single_label_accuracy": weighted_accuracy,
"average_single_label_accuracy": accuracy,
}
def _compute_multi_label_results(
metrics: Sequence[PredictionMetrics]) -> metrics_aggregators.Results:
"""Computes results for multi label case."""
num_events = 0
total_mean_average_precision = 0.0
for m in metrics:
if not isinstance(
m.metrics,
multi_label_classification_metrics.MultiLabelClassificationMetrics):
continue
num_events += 1
total_mean_average_precision += m.metrics.mean_average_precision
if num_events == 0:
mean_mean_average_precision = float("nan")
else:
mean_mean_average_precision = total_mean_average_precision / num_events
# TODO: Find a better way to combine mAP.
return {
"average_multi_label_mean_average_precision": mean_mean_average_precision
}
def _combined_train_resources_used(
state: NevisMetricsState) -> learner_interface.ResourceUsage:
"""Computes total train resources used so far."""
result = None
for m in state.metrics:
if not isinstance(m, TrainMetrics):
continue
if result is None:
result = m.resources_used
else:
result = result.combine(m.resources_used)
if result is None:
return learner_interface.ResourceUsage()
return result
def _num_train_events(state: NevisMetricsState) -> int:
return sum(1 for m in state.metrics if isinstance(m, TrainMetrics))
def _try_to_extract_split(dataset_key: str) -> Optional[str]:
"""Attempts to compute the split from the dataset key.
For the Nevis stream, the dataset splits are stored at the end of the dataset
key, as `<dataset_name>_<split>`.
Args:
dataset_key: The key to try and compute the split for.
Returns:
The split name, or None if no match was found.
"""
suffices_by_length = sorted(KNOWN_SPLIT_SUFFICES, key=lambda x: -len(x))
for suffix in suffices_by_length:
if dataset_key.endswith("_" + suffix):
return suffix
return None
def _create_output_dir() -> str:
result = os.path.join(DEFAULT_OUTPUT_DIR, "predictions")
if not gfile.exists(result):
gfile.makedirs(result)
return result
class WrappedPredictionsWriter:
"""A writer for storing raw predictions to an output file.
This writer wraps a prediction iterator and copies the raw outputs
and targets in memory. When the context managed by this object is closed,
the raw data is concatenated together into a single numpy array, and then
written into a multipart numpy array to the output path.
"""
def __init__(self, predictions: Iterator[learner_interface.Predictions], *,
path: str, task: tasks.TaskKey):
if task.kind not in {
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
}:
raise ValueError("Cannot save predictions for unsupported task: {task}")
self._task = task
self._path = path
self._iter = predictions
self._raw_targets = []
self._raw_outputs = []
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
if not self._raw_outputs:
logging.warning("Skipping writing empty predictions...")
return
logging.info("Writing targets and outputs to local files...")
targets = np.concatenate(self._raw_targets, axis=0)
outputs = np.concatenate(self._raw_outputs, axis=0)
# tf.io.gfile is not seekable and thus fails to write in w+ mode
# instead we write on temporary buffer first.
# https://github.com/tensorflow/tensorflow/issues/32090#issuecomment-986135710
io_buffer = io.BytesIO()
np.savez(io_buffer, targets=targets, outputs=outputs)
with gfile.GFile(self._path, "wb") as outfile:
logging.info("Writing raw targets and outputs to %s", self._path)
outfile.write(io_buffer.getvalue())
logging.info("Finished writing raw targets and outputs.")
def __iter__(self):
return self
def __next__(self):
prediction = next(self._iter)
if self._task.kind is tasks.TaskKind.CLASSIFICATION:
targets, outputs = prediction.batch.label, prediction.output
self._raw_targets.append(targets)
self._raw_outputs.append(outputs[0])
elif self._task.kind is tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
targets, outputs = prediction.batch.multi_label_one_hot, prediction.output
self._raw_targets.append(targets)
self._raw_outputs.append(np.stack(outputs, axis=1))
else:
raise ValueError(f"Unsupported task: {self._task.kind}")
return prediction
|
dm_nevis-master
|
experiments_jax/metrics/nevis_metrics.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_jax/metrics/__init__.py
|
"""Support creation of search spaces for hyperparameter searches."""
import math
import random
import typing
from typing import Any, Sized, Iterable, Protocol, TypeVar
T = TypeVar('T')
@typing.runtime_checkable
class SizedIterable(Sized, Iterable[T], Protocol[T]):
"""A type annotation to represent types with __len__ and __iter__ methods.
Types satisfying this interface may be iterated over, to yield a fixed number
of elements of type T. The total number of elements yieled is equal to value
returned by __len__ for the type. There is no requirement that the yielded
elements be identical on each iteration, and this property is useful for
representing sequences of values sampled from a probability distribution.
>>> x: SizedIterable[int] = [1, 2, 3]
>>> isinstance(x, SizedIterable)
True
"""
Value = Any
Overrides = dict[str, Value]
Sweep = SizedIterable[Overrides]
def sweep(key: str, values: SizedIterable[Value]) -> Sweep:
"""Combines a key with values to create a sweep.
>>> list(sweep('a', [1, 2, 3]))
[{'a': 1}, {'a': 2}, {'a': 3}]
Each time the sweep is iterated, the sweep is re-generated from the values.
This means each subsequent iteration of the sweep can yield different values,
if the underlying values are from a randomized source. This makes it possible
to build search spaces by producting together sweeps, but requires some care
to ensure determinism.
Args:
key: The key that this sweep should override with values.
values: The iterable values to apply to the key.
Returns:
An iterable, sized object which may be iterated to yield overrides.
"""
class ValueSweep:
def __len__(self):
return len(values)
def __iter__(self):
yield from ({key: value} for value in values)
return ValueSweep()
def chain(*sweeps: Sweep) -> Sweep:
"""Chains sweeps together.
>>> list(chain([{'a': 1}, {'a': 2}], [{'b': 1}, {'b': 2}]))
[{'a': 1}, {'a': 2}, {'b': 1}, {'b': 2}]
Args:
*sweeps: A sequence of sweeps.
Returns:
A new sweep which, on each iteration, iterates over the given sweeps in
order.
"""
class ChainSweep:
def __len__(self):
return sum(len(s) for s in sweeps)
def __iter__(self):
for s in sweeps:
yield from s
return ChainSweep()
def zipit(*sweeps: Sweep) -> Sweep:
"""Zips sweeps together.
>>> list(zipit([{'a': 1}, {'a': 2}], [{'b': 1}, {'b': 2}]))
[{'a': 1, 'b': 1}, {'a': 2, 'b': 2}]
Args:
*sweeps: A sequence of sweeps.
Returns:
A new sweep which, on each iteration, will iterate all of the given sweeps
together and combine their overrides.
"""
lengths = set(len(s) for s in sweeps)
if len(lengths) != 1:
msg = ', '.join(f'{type(s)}: {len(s)}' for s in sweeps)
raise ValueError(f'zip expects sweeps to have same length. Got {msg}')
class ZipSweep:
"""A collection of sweeps combined using zipit."""
def __len__(self):
return len(sweeps[0])
def __iter__(self):
iters = [iter(s) for s in sweeps]
for _ in range(len(sweeps[0])):
result = {}
for it in iters:
result.update(next(it))
yield result
return ZipSweep()
def product(*sweeps: Sweep) -> Sweep:
"""Lazily generates the Cartesian product over sweeps.
>>> list(product([{'a': 1}, {'a': 2}], [{'b': 1}, {'b': 2}]))
[{'a': 0, 'b': 0}, {'a': 0, 'b': 1}, {'a': 1, 'b': 0}, {'a': 1, 'b': 1}]
For the product over sweeps s0, s1, containing non-randomly-sampled values,
the behaviour is identical to
>>> ({**a, **b} for a, b in itertools.product(s0, s1))
If the sweeps contain randomly sampled value ranges however, product will
guarantee that every sample returned is uniquely drawn from the underlying
sweeps.
Args:
*sweeps: A sequence of sweeps.
Returns:
A new sweep which, on each iteration, computes the product of the given
sweeps. We guarantee that every value in the returned sweep is used exactly
once in the returned sweep. This means that it is safe to used product
with randomly sampled sweeps, and values will not be duplicated across
the overrides. This is a key difference with, for example,
itertools.product, or hyper.
"""
class ProductSweep:
"""A collection of sweeps combined using product."""
def __len__(self):
return math.prod(len(s) for s in sweeps)
def __iter__(self):
if not sweeps:
yield from []
elif len(sweeps) == 1:
yield from sweeps[0]
elif len(sweeps) > 2:
yield from product(product(sweeps[0], sweeps[1]), *sweeps[2:])
else:
# Note that it would feel more natural to use the following code below,
#
# for a in sweeps[0]:
# for b in sweeps[1]:
# yield {**a, **b}
#
# However, this would introduce a subtle bug: sweeps[1] would be
# iterated len(sweeps[1]) times, and sweeps[0] would only be iterated
# once, with each yielded value being reused several times in the inner
# loop. Since sweeps may be over randomly sampled ranges, this can cause
# the distribution of the returned values to collapse, resulting in
# repeated values that should have been drawn independently at random.
#
# To solve this, we maintain a cache of iterators over the outer sweep,
# with one iterator for each element of the inner sweep. Each time we
# must sample from the outer iterator in the inner loop, we advance the
# appropriate cached iterator.
#
# As a further optimization, the iterators are lazily constructed on
# first access, to allow us to efficiently iterate over the first few
# elements of very large products.
#
# This solution is optimal in the number of samples drawn from the
# iterators, and uses memory proportional to the length of the the inner
# iterator. Furthermore, the order of iteration of the values is
# identical to the naive loop implementation, for the case of iterables
# that always yield the same values (such as regular sequences).
iters = []
for i in range(len(sweeps[0])):
for j, dct in enumerate(sweeps[1]):
if i == 0:
iters.append(iter(sweeps[0]))
yield {**next(iters[j]), **dct}
return ProductSweep()
def log_uniform_random_values(lo: float, hi: float, *, n: int,
seed: int) -> SizedIterable[float]:
"""Returns an iterable that yields log-uniform distributed values.
>>> list(log_uniform_random_values(1e-1, 1e-3, n=3, seed=0))
[0.0020471812581430546, 0.003048535060583298, 0.014416400482129478]
Args:
lo: The lower-bound of the log-uniform random number generator.
hi: The upper-bound of the log-uniform random number generator.
n: The number of values returned during a single iteration over the values.
seed: A value to seed the random number generator.
Returns:
A collection of floats sampled from a log-uniform distribution [lo, hi].
Note that each iteration over the values will be different from the last,
but that the values are always deterministic.
"""
values = uniform_random_values(math.log(lo), math.log(hi), n=n, seed=seed)
class LogUniformRandomValues:
def __len__(self):
return n
def __iter__(self):
return (math.exp(v) for v in values)
return LogUniformRandomValues()
def uniform_random_values(lo: float, hi: float, *, n: int,
seed: int) -> SizedIterable[float]:
"""Returns an iterable that yields uniform distributed values.
>>> list(uniform_random_values(0, 1, n=3, seed=0))
[0.8444218515250481, 0.7579544029403025, 0.420571580830845]
Args:
lo: The lower-bound of the uniform random number generator.
hi: The upper-bound of the uniform random number generator.
n: The number of values returned during a single iteration over the values.
seed: A value to seed the random number generator.
Returns:
A collection of floats sampled from a uniform distribution [lo, hi].
Note that each iteration over the values will be different from the last,
but that the values are always deterministic.
"""
rng = random.Random(seed)
class UniformRandomValues:
def __len__(self):
return n
def __iter__(self):
return (rng.uniform(lo, hi) for _ in range(n))
return UniformRandomValues()
|
dm_nevis-master
|
experiments_jax/training/hype.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to compute a transfer matrix using a KNN classifier."""
import datetime
import time
from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from experiments_jax.training import evaluate_embeddings
from experiments_jax.training import transfer_oracle
import numpy as np
def compute_transfer_matrix_using_knn_classifier(
embedding_fn: evaluate_embeddings.EmbeddingFn,
tasks_and_train_states: Sequence[Tuple[
tasks.TaskKey, evaluate_embeddings.EmbeddingFnState]],
train_dataset: datasets.Dataset,
test_dataset: datasets.Dataset,
*,
batch_size: int,
preprocessing_fn: Callable[[datasets.MiniBatch], datasets.MiniBatch],
) -> transfer_oracle.TransferMatrix:
"""Computes a single-column transfer matrix using an embedding function.
Args:
embedding_fn: A callable that computes embeddings for examples given a
state.
tasks_and_train_states: The viable tasks and train states that can be
transferred from.
train_dataset: The dataset to use for training a model using the
embedding.
test_dataset: The dataset to use to test the embeddings.
batch_size: The batch size to use when computing the embeddings.
preprocessing_fn: A function to map to the datasets (e.g. image resize.)
before calling embed.
Returns:
A transfer matrix estimated from the input train states. Note that the
transfer matrix only has a single column, corresponding to task associated
with the train and test dataset.
"""
task_keys = [t[0] for t in tasks_and_train_states]
states = [t[1] for t in tasks_and_train_states]
logging.info('Evaluating transfer using KNN for %d states', len(task_keys))
start_time = time.monotonic()
results = evaluate_embeddings.evaluate(
embedding_fn,
states=states,
train_dataset=train_dataset,
test_dataset=test_dataset,
batch_size=batch_size,
preprocessing_fn=preprocessing_fn,
)
elapsed = datetime.timedelta(seconds=(time.monotonic() - start_time))
_log_summary(elapsed, train_dataset.task_key, results, task_keys)
# Note(rhemsley): The transfer matrix uses smaller values to indicate better
# transfer, so subtract all weights from 1 here.
matrix = np.array([[1 - r.weight] for r in results])
return transfer_oracle.TransferMatrix(
source_tasks=task_keys,
target_tasks=[train_dataset.task_key],
matrix=matrix,
)
def publish_transfer_matrix(
metrics: datawriter_interface.DataWriter,
matrix: transfer_oracle.TransferMatrix,
*,
extra_context: Optional[Mapping[str, Any]],
) -> None:
"""Publishes transfer values to a metrics writer."""
logging.info('Publishing computed transfer values to metrics writer...')
target_task, *other_target_tasks = matrix.target_tasks
if other_target_tasks:
raise ValueError(
f'Expected only a single target task, got {matrix.target_tasks}')
extra_context = extra_context or {}
for i, (source_task, weight) in enumerate(matrix.transfer_tasks(target_task)):
metrics.write({
'target_task_name': target_task.name,
'source_task_name': source_task.name,
'source_task_column_index': i,
'weight': weight,
**extra_context,
})
metrics.flush()
def _log_summary(elapsed: datetime.timedelta, target_task: tasks.TaskKey,
results: Sequence[evaluate_embeddings.EvaluationResult],
task_keys: Sequence[tasks.TaskKey]) -> None:
"""Logs the results of the KNN transfer weightings."""
sorted_tasks = sorted((r.weight, task, i)
for i, (r, task) in enumerate(zip(results, task_keys)))
headers = ['Weight (higher is better)', 'Source Task', 'Original Index']
logging.info(
'Computed transfer matrix in %s for task %s\n%s\n',
elapsed,
target_task,
_tabulate(reversed(sorted_tasks), headers),
)
def _tabulate(elements: Iterable[Sequence[Any]], headers: Iterable[Any]) -> str:
"""Builds an ASCII table of results."""
# TODO: Solve why we can't easily depend on tabulate, due to
# visibility issues and global tap failures.
headers = list(headers)
elements = list(elements)
max_length_by_col = [len(str(h)) for h in headers]
for row in elements:
for i, r in enumerate(row):
max_length_by_col[i] = max(max_length_by_col[i], len(str(r)))
def format_line(values, max_length_by_col):
line = '|'
for v, n in zip(values, max_length_by_col):
line += f' {v} '.ljust(n + 3) + '|'
return line
length = (sum(max_length_by_col) + 1 + 4 * len(max_length_by_col))
lines = []
lines.append('-' * length)
lines.append(format_line(headers, max_length_by_col))
lines.append('-' * length)
for row in elements:
lines.append(format_line(row, max_length_by_col))
lines.append('-' * length)
return '\n'.join(lines)
|
dm_nevis-master
|
experiments_jax/training/transfer_matrix_from_knn.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Precomputed transfer matrix."""
import itertools
import json
from typing import Any, Dict, List, Sequence, Tuple
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
class TransferMatrix:
"""Represents a transfer matrix between TaskKeys.
The transfer matrix is represented as an R^(n, m) matrix, where n is the
number of source tasks (tasks that can be transferred from) and m is the
number of target tasks.
Given the transfer matrix M, we take
M[i,j] := e_j(p_i) - c(i, j).
Where e_j(p_i) is an estimated error achieved when fitting a model to solve
the target task j using parameters p_i, trained using source task i.
c(i, j) is a computed normalizing offset.
The normalizing c(i, j) offset may be zero, if no extra information is known,
otherwise c(i, j) represents e_j(p_z), where p_z represents a randomly
initialized set of parameters.
This means that when selecting tasks to transfer, smaller values correspond
to the best source tasks.
TODO: unfortunately, given this convention, it appears to not be
possible to distinguish the case where the value c(.,.) is known, and it is
not advantageous to transfer from any existing task, from the case where no
offsetting constants are known. We should find a way to improve this.
"""
def __init__(self, source_tasks: Sequence[tasks.TaskKey],
target_tasks: Sequence[tasks.TaskKey], matrix: np.ndarray):
"""Represents a transfer matrix from a source-tasks to target tasks."""
assert isinstance(matrix, np.ndarray)
assert matrix.shape == (len(source_tasks), len(target_tasks))
self.source_tasks = source_tasks
self.target_tasks = target_tasks
self.transfer_matrix = matrix
@classmethod
def from_file(cls, filename: str) -> "TransferMatrix":
"""Loads a transfer matrix from the given file.
The file has to be a .json-file with 3 fields: "source_tasks" and
"target_tasks" and lists with TaskKeys for the rows and columns
of the transfer matrix respectively.
"matrix" contains a numpy array. The rows correspond to source-tasks and
the columns to target tasks. The upper triangular matrix describes causally
admissable pairs when sequentially ingesting tasks.
See colabs/transfer_matrix.ipynb to recreate a transfer matrix.
Args:
filename: The filename for a transfer-matrix pickle file. If the supplied
filename comes without directory component, it is assumed to be raltive
to the ./transfer_matrices/ resource directory.
Returns:
A TransferMatrix instance.
"""
with open(filename, "rb") as f:
return cls.from_dict(json.load(f))
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> "TransferMatrix":
return cls(
source_tasks=[tasks.TaskKey.from_dict(e) for e in d["source_tasks"]],
target_tasks=[tasks.TaskKey.from_dict(e) for e in d["target_tasks"]],
matrix=np.asarray(d["matrix"]))
def to_dict(self) -> Dict[str, Any]:
return dict(
source_tasks=[task.to_dict() for task in self.source_tasks],
target_tasks=[task.to_dict() for task in self.target_tasks],
matrix=self.transfer_matrix.tolist())
def task_key_by_name(self, name: str) -> tasks.TaskKey:
"""Returns the task_key for the given task name."""
for tk in itertools.chain(self.target_tasks, self.source_tasks):
if tk.name == name:
return tk
raise ValueError(f"Unknown task_name '{name}'")
def transfer_tasks(self,
task: tasks.TaskKey) -> List[Tuple[tasks.TaskKey, float]]:
"""Returns a list of source transfer tasks, ordered by their usefullness.
Args:
task: a target task to transfer to.
Returns:
a list of (task_key, transfer)-tuples (tasks with highest transfer first).
"""
if task not in self.target_tasks:
return []
col_idx = self.target_tasks.index(task)
transfer_column = self.transfer_matrix[:, col_idx]
return [(self.source_tasks[i], transfer_column[i])
for i in transfer_column.argsort()]
|
dm_nevis-master
|
experiments_jax/training/transfer_oracle.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.models."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import tasks
from experiments_jax.training import models
from experiments_jax.training import modules
import haiku as hk
import jax.numpy as jnp
SUPPORTED_TASKS = frozenset({
tasks.TaskKey("task1", tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=10)),
tasks.TaskKey("task2", tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=20)),
})
class ModelsTest(parameterized.TestCase):
def test_modelbuilder(self):
batch_size = 2
image_size = 4
model = models.build_model(
functools.partial(modules.MLP, output_sizes=[16]),
supported_tasks=SUPPORTED_TASKS,
image_resolution=image_size)
prng = hk.PRNGSequence(0)
params, state = model.init(next(prng))
num_expected_modules = 1 + len(SUPPORTED_TASKS)
self.assertLen(params, num_expected_modules)
images = jnp.zeros([batch_size, image_size, image_size, 3])
labels = jnp.zeros([batch_size], dtype=jnp.int32)
for task in SUPPORTED_TASKS:
_, _ = model.loss_and_metrics[task](params, state, next(prng), images,
labels, True)
_ = model.predict[task](params, state, next(prng), images, True)
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/models_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for evaluate_embeddings."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.datasets.builders import test_dataset as test_dataset_builder
from experiments_jax.training import evaluate_embeddings
import numpy as np
EMBEDDING_DIMS = 2
IMAGE_SIZE = 8
class EvaluateEmbeddingsTest(parameterized.TestCase):
@parameterized.parameters([
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
])
def test_evaluate_embeddings_classification(self, task_kind):
train_dataset = test_dataset_builder.get_dataset(
split='train',
image_size=IMAGE_SIZE,
task_kind=task_kind,
)
test_dataset = test_dataset_builder.get_dataset(
split='test',
image_size=IMAGE_SIZE,
task_kind=task_kind,
)
def random_projection_embed(state, batch):
rng = np.random.default_rng(seed=state)
weights = rng.uniform(size=[IMAGE_SIZE * IMAGE_SIZE * 3, EMBEDDING_DIMS])
feats = np.reshape(batch.image, (batch.image.shape[0], -1))
return feats @ weights
# The states are used as seeds for the random projection embedding.
states = [1, 2, 3]
results = evaluate_embeddings.evaluate(
random_projection_embed,
states,
train_dataset=train_dataset,
test_dataset=test_dataset,
batch_size=10,
)
# The test task is easy to solve, and the KNN classifier should have less
# than 0.1 error rate.
max_weight = max(result.weight for result in results)
self.assertGreater(max_weight, 0.9)
def test_multilabel_knn_classifier(self):
"""Test the edge case that one of the labels is all the same value."""
classifier = evaluate_embeddings._MultiLabelKNNClassifier()
x = np.array([
[0],
[1],
[2],
[3],
[4],
])
# Note that the first column is all 0, and the final column is all 1 - this
# exercises the edge case in sklearn that attempts to guess the number of
# classes from the number of distinct values, where we know there are two.
y = np.array(
[
[0, 1, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 0, 1],
],
dtype=np.int32,
)
classifier.fit(x, y)
result = classifier.predict([[0], [1], [2]])
expected = np.array([
[0., 0.6, 0.4, 1.],
[0., 0.6, 0.4, 1.],
[0., 0.6, 0.4, 1.],
])
np.testing.assert_allclose(expected, result)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/evaluate_embeddings_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.trainer."""
import functools
from typing import Set
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from experiments_jax.training import models
from experiments_jax.training import modules
from experiments_jax.training import trainer
import haiku as hk
import jax.numpy as jnp
import optax
SUPPORTED_TASKS = [
tasks.TaskKey("task1", tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=10)),
tasks.TaskKey("task2", tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(num_classes=20)),
]
def modules_in_params(params: hk.Params) -> Set[str]:
return {m for m, _, _ in hk.data_structures.traverse(params)}
class TrainerTest(parameterized.TestCase):
def test_no_frozen_parameters(self):
batch_size = 2
image_size = 4
prng = hk.PRNGSequence(0)
model = models.build_model(
functools.partial(modules.MLP, output_sizes=[16]),
supported_tasks=SUPPORTED_TASKS,
image_resolution=image_size)
optimizer = optax.sgd(0.1)
train_state = trainer.init_train_state(next(prng), model, optimizer)
expected_modules = {
"backbone/~/mlp/~/linear_0",
"task1_head/~/linear",
"task2_head/~/linear"}
self.assertSetEqual(
modules_in_params(train_state.trainable_params), expected_modules)
self.assertSetEqual(
modules_in_params(train_state.frozen_params), set())
update_fn = trainer.build_update_fn(SUPPORTED_TASKS[0], model, optimizer)
# Fake Data
batch = datasets.MiniBatch(
image=jnp.zeros([batch_size, image_size, image_size, 3]),
label=jnp.zeros([batch_size]), multi_label_one_hot=None)
train_state, _ = update_fn(batch, train_state)
self.assertSetEqual(
modules_in_params(train_state.trainable_params), expected_modules)
self.assertSetEqual(
modules_in_params(train_state.frozen_params), set())
def test_frozen_parameters(self):
batch_size = 2
image_size = 4
prng = hk.PRNGSequence(0)
model = models.build_model(
functools.partial(modules.MLP, output_sizes=[16], name="mlp"),
supported_tasks=SUPPORTED_TASKS,
image_resolution=image_size)
optimizer = optax.sgd(0.1)
def load_params_fn(params, state):
# Consider parameters in the backbone frozen, heads are traiable:
train_params, frozen_params = hk.data_structures.partition(
lambda module_name, _1, _2: not module_name.startswith("backbone"),
params)
return train_params, frozen_params, state
train_state = trainer.init_train_state(
next(prng), model, optimizer, load_params_fn)
self.assertSetEqual(
modules_in_params(train_state.trainable_params),
{"task1_head/~/linear", "task2_head/~/linear"})
self.assertSetEqual(
modules_in_params(train_state.frozen_params),
{"backbone/~/mlp/~/linear_0"})
update_fn = trainer.build_update_fn(SUPPORTED_TASKS[0], model, optimizer)
# Fake Data
batch = datasets.MiniBatch(
image=jnp.zeros([batch_size, image_size, image_size, 3]),
label=jnp.zeros([batch_size]), multi_label_one_hot=None)
train_state, _ = update_fn(batch, train_state)
self.assertSetEqual(
modules_in_params(train_state.trainable_params),
{"task1_head/~/linear", "task2_head/~/linear"})
self.assertSetEqual(
modules_in_params(train_state.frozen_params),
{"backbone/~/mlp/~/linear_0"})
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/trainer_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multihead models."""
from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Tuple
import chex
import distrax
from dm_nevis.benchmarker.datasets import tasks
from experiments_jax.training import heads
import haiku as hk
import jax
import numpy as np
PredictFn = Callable[[hk.Params, hk.State, chex.PRNGKey, chex.ArrayTree, bool],
Tuple[chex.ArrayTree, hk.State]]
LossAndMetricsFn = Callable[
[hk.Params, hk.State, chex.PRNGKey, chex.ArrayTree, chex.ArrayTree, bool],
Tuple[Tuple[chex.ArrayTree, chex.ArrayTree], hk.State]]
InitFn = Callable[[chex.PRNGKey], Tuple[hk.Params, hk.State]]
@chex.dataclass
class Model:
init: InitFn
predict: Mapping[tasks.TaskKey, PredictFn]
loss_and_metrics: Mapping[tasks.TaskKey, LossAndMetricsFn]
def build_model(model_ctor: Callable[..., Any],
supported_tasks: Iterable[tasks.TaskKey],
image_resolution: int,
head_kwargs: Optional[Dict[str, Any]] = None) -> Model:
"""Constructs a model with a backbone and multiple task heads.
Args:
model_ctor: Constructor for the backbone.
supported_tasks: The tasks that the returned model supports training on.
image_resolution: The suppored image resolution of the returned model.
head_kwargs: kwargs for head constructor.
Returns:
A model implementing the independent baseline strategy.
"""
head_kwargs = head_kwargs or {}
init_fns = {}
predict_fns = {}
loss_and_metrics_fns = {}
for task in set(supported_tasks):
@hk.transform_with_state
def predict(img, is_training, task=task):
backbone = model_ctor(name="backbone")
head = heads.build_head({task}, **head_kwargs)
feats = backbone(img, is_training)
distributions = head.predict(feats, is_training)
probs = []
for distribution in distributions:
assert (isinstance(distribution, distrax.Categorical) or
isinstance(distribution, distrax.Bernoulli))
probs.append(distribution.probs)
return probs
@hk.transform_with_state
def loss_and_metrics(img, labels, is_training, task=task):
backbone = model_ctor(name="backbone")
head = heads.build_head({task}, **head_kwargs)
feats = backbone(img, is_training)
return head.loss_and_metrics(feats, labels, is_training)
init_fns[task] = loss_and_metrics.init
predict_fns[task] = jax.jit(predict.apply, static_argnums=[4])
loss_and_metrics_fns[task] = jax.jit(
loss_and_metrics.apply, static_argnums=[5])
def init(rng_key):
images = np.zeros([1, image_resolution, image_resolution, 3])
all_params = None
all_states = None
for task_key, fn in init_fns.items():
if task_key.kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
num_classes = task_key.metadata.num_classes
labels = np.zeros([1, num_classes], dtype=np.int32)
else:
labels = np.zeros([1], dtype=np.int32)
params, state = fn(rng_key, images, labels, True)
if all_params is None:
all_params = params
all_states = state
else:
all_params = hk.data_structures.merge(all_params, params)
all_states = hk.data_structures.merge(all_states, state)
return all_params, all_states
return Model(
init=init,
predict=predict_fns,
loss_and_metrics=loss_and_metrics_fns,
)
def size_summary(params: chex.ArrayTree) -> str:
"""Returns a string summarizing the size of `params`."""
num_params = hk.data_structures.tree_size(params)
byte_size = hk.data_structures.tree_bytes(params)
return f"{num_params} params ({byte_size / 1e6:.2f}MB)"
def param_summary(params: chex.ArrayTree) -> str:
"""Returns a string with a detailed parameter breakdown."""
return "\n".join([
f" {m}/{n}: {v.shape} [{v.dtype}]"
for m, n, v in hk.data_structures.traverse(params)
])
|
dm_nevis-master
|
experiments_jax/training/models.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.optimizers."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_jax.training import optimizers
import haiku as hk
import jax
import jax.numpy as jnp
class OptimizersTest(parameterized.TestCase):
def test_default_weight_decay_mask(self):
@hk.transform_with_state
def f(x, is_training):
x = hk.Linear(10, name='layer_1')(x)
x = hk.BatchNorm(
create_scale=True,
create_offset=True,
decay_rate=1e-3,
)(x, is_training=is_training)
x = hk.Linear(10, name='layer_2')(x)
return x
params, _ = f.init(jax.random.PRNGKey(0), jnp.zeros([2, 10]), True)
masked_params = optimizers.default_weight_decay_mask(params)
self.assertEqual(
masked_params,
{
'batch_norm': {
'offset': False,
'scale': False
},
'layer_1': {
'b': False,
'w': True
},
'layer_2': {
'b': False,
'w': True
}
},
)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/optimizers_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to split and load training data."""
from typing import Callable, Iterator
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
BatchIterator = Iterator[datasets.MiniBatch]
PreprocFn = Callable[[datasets.MiniBatch], datasets.MiniBatch]
# For datasets containing fewer than this number of elements, we cache the
# dataset in memory, before preprocessing is applied. This avoids problematic
# cases where very small datasets require many requests to the underlying
# file storage.
DATASET_SIZE_TO_CACHE = 5_000
def build_train_iterator(dataset: datasets.Dataset, preproc_fn: PreprocFn,
batch_size: int) -> Callable[[], BatchIterator]:
"""Builds functions to iterate over train and validation data."""
def build_iterator() -> BatchIterator:
ds = dataset.builder_fn(shuffle=True)
if dataset.num_examples < DATASET_SIZE_TO_CACHE:
logging.info("Caching dataset with %d elements", dataset.num_examples)
ds = ds.cache()
buffer_size = min(DATASET_SIZE_TO_CACHE, dataset.num_examples)
ds = ds.shuffle(buffer_size, reshuffle_each_iteration=True)
ds = ds.repeat()
ds = ds.map(
preproc_fn,
num_parallel_calls=tf.data.AUTOTUNE) #, deterministic=False)
ds = ds.batch(batch_size)
ds = ds.prefetch(10)
return iter(tfds.as_numpy(ds))
return build_iterator
def build_prediction_iterator(dataset: datasets.Dataset, preproc_fn: PreprocFn,
batch_size: int) -> Callable[[], BatchIterator]:
"""Builds an iterator over batches for use in prediction."""
def build_iterator():
ds = dataset.builder_fn(shuffle=False)
ds = ds.map(preproc_fn, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.batch(batch_size)
ds = ds.prefetch(10)
return iter(tfds.as_numpy(ds))
return build_iterator
|
dm_nevis-master
|
experiments_jax/training/dataloaders.py
|
dm_nevis-master
|
experiments_jax/training/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for evaluating embeddings using a KNN classifier."""
import dataclasses
from typing import Any, Callable, Sequence, Tuple
from absl import logging
import chex
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
import numpy as np
import sklearn.neighbors
import tensorflow as tf
import tensorflow_datasets as tfds
EmbeddingFnState = Any
EmbeddingFn = Callable[[EmbeddingFnState, datasets.MiniBatch], np.ndarray]
DEFAULT_BATCH_SIZE = 32
DEFAULT_MAX_TRAIN_DATASET_SIZE = 10_000
DEFAULT_MAX_TEST_DATASET_SIZE = 5_000
@dataclasses.dataclass(frozen=True)
class EvaluationResult:
weight: float
state: EmbeddingFnState
def evaluate(
embedding_fn: EmbeddingFn,
states: Sequence[EmbeddingFnState],
*,
train_dataset: datasets.Dataset,
test_dataset: datasets.Dataset,
batch_size: int = DEFAULT_BATCH_SIZE,
max_train_size: int = DEFAULT_MAX_TRAIN_DATASET_SIZE,
max_test_size: int = DEFAULT_MAX_TEST_DATASET_SIZE,
preprocessing_fn: Callable[[datasets.MiniBatch],
datasets.MiniBatch] = lambda x: x,
) -> Sequence[EvaluationResult]:
"""Given an emebdding function, computes the embedding function quality.
Args:
embedding_fn: A parameterized embedding function, parameterized by a "state"
states: The states to use for the embedding function. The evaluation will be
computed over all of the given states. This makes it possible to compare
the embedding quality for multiple parameter sets.
train_dataset: The dataset to train a KNN classifier with.
test_dataset: A dataset of test examples to evaluate against.
batch_size: The maximum batch size to use when computing embeddings.
max_train_size: Limit the train dataset to this number of examples.
max_test_size: Limit the test dataset to this number of examples.
preprocessing_fn: A function to apply to the minibatches before embeddings
are computed. This will be called in tensorflow datasets, and so must be a
valid tensorflow function that can operate in graph mode.
Returns:
A sequence of results, in the same order as the input states, each of which
corresponds to one of the states in the `states` argument. Each result has a
weight (larger weights are better), and the associated state that achieved
it. For classification tasks, the weight represents the accuracy on the test
set. For multi-label classification tasks, the weight represents the mAP
computed on the test dataset.
"""
task = train_dataset.task_key
train_ds, test_ds = _create_datasets(train_dataset, test_dataset, batch_size,
preprocessing_fn, max_train_size,
max_test_size)
result = []
for state in states:
if task.kind is tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
weight = _evaluate_multilabel_classification_embedding(
embedding_fn,
state,
train_ds,
test_ds,
)
elif task.kind is tasks.TaskKind.CLASSIFICATION:
weight = _evaluate_classification_embedding(
embedding_fn,
state,
train_ds,
test_ds,
)
else:
raise ValueError(f'Unsupported task kind: {task}')
result.append(EvaluationResult(weight, state))
return result
def _evaluate_multilabel_classification_embedding(
embedding_fn: EmbeddingFn,
state: EmbeddingFnState,
train_ds: tf.data.Dataset,
test_ds: tf.data.Dataset,
) -> float:
"""Evaluates multilabel classifiction tasks."""
train_embeddings, train_labels = _compute_multilabel_classification_embeddings(
embedding_fn,
state,
train_ds,
)
test_embeddings, test_labels = _compute_multilabel_classification_embeddings(
embedding_fn,
state,
test_ds,
)
classifier = _MultiLabelKNNClassifier()
classifier.fit(train_embeddings, train_labels)
predictions = classifier.predict(test_embeddings)
chex.assert_equal_shape([predictions, test_labels])
mean_average_precision = sklearn.metrics.average_precision_score(
test_labels,
predictions,
)
return mean_average_precision
def _evaluate_classification_embedding(
embedding_fn: EmbeddingFn,
state: EmbeddingFnState,
train_ds: tf.data.Dataset,
test_ds: tf.data.Dataset,
) -> float:
"""Evaluates an embedding function."""
train_embeddings, train_labels = _compute_classification_embeddings(
embedding_fn,
state,
train_ds,
)
test_embeddings, test_labels = _compute_classification_embeddings(
embedding_fn,
state,
test_ds,
)
classifier = sklearn.neighbors.KNeighborsClassifier()
classifier.fit(train_embeddings, train_labels)
predictions = classifier.predict(test_embeddings)
chex.assert_equal_shape([predictions, test_labels])
accuracy = (predictions == test_labels).mean()
return accuracy
def _compute_multilabel_classification_embeddings(
embedding_fn: EmbeddingFn,
state: EmbeddingFnState,
ds: tf.data.Dataset,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes embeddings for multilabel classifiction tasks."""
embeddings = []
labels = []
total_examples_seen = 0
for batch in tfds.as_numpy(ds):
logging.log_every_n_seconds(logging.INFO, 'Completed %d embeddings...',
10, total_examples_seen)
embeddings.append(embedding_fn(state, batch))
labels.append(batch.multi_label_one_hot.astype(np.int32))
total_examples_seen += batch.image.shape[0]
logging.info('Completed %d embeddings [done].', total_examples_seen)
embeddings = np.concatenate(embeddings, axis=0)
labels = np.concatenate(labels, axis=0)
chex.assert_rank(embeddings, 2)
chex.assert_rank(labels, 2)
return embeddings, labels
def _compute_classification_embeddings(
embedding_fn: EmbeddingFn,
state: EmbeddingFnState,
ds: tf.data.Dataset,
) -> Tuple[np.ndarray, np.ndarray]:
"""Computes emebddings for classification tasks."""
embeddings = []
labels = []
total_examples_seen = 0
for batch in tfds.as_numpy(ds):
logging.log_every_n_seconds(logging.INFO,
'Completed %d embeddings...', 10,
total_examples_seen)
embeddings.append(embedding_fn(state, batch))
labels.append(batch.label)
total_examples_seen += batch.image.shape[0]
logging.info('Completed %d embeddings [done].', total_examples_seen)
embeddings = np.concatenate(embeddings, axis=0)
labels = np.concatenate(labels, axis=0)
chex.assert_rank(embeddings, 2)
chex.assert_rank(labels, 1)
return embeddings, labels
def _create_datasets(
train_dataset: datasets.Dataset,
test_dataset: datasets.Dataset,
batch_size: int,
preprocessing_fn: Callable[[datasets.MiniBatch], datasets.MiniBatch],
max_train_size: int,
max_test_size: int,
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""Creates the datasets for training and testing."""
train_ds = train_dataset.builder_fn(shuffle=True)
train_ds = train_ds.take(max_train_size)
train_ds = train_ds.map(preprocessing_fn)
train_ds = train_ds.batch(batch_size).cache()
test_ds = test_dataset.builder_fn(shuffle=True)
test_ds = test_ds.take(max_test_size)
test_ds = test_ds.map(preprocessing_fn)
test_ds = test_ds.batch(batch_size).cache()
return train_ds, test_ds
class _MultiLabelKNNClassifier:
"""A multi-label classifier for multi-label binary classification tasks."""
def __init__(self):
self._classifier = sklearn.neighbors.KNeighborsClassifier()
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
"""Fits a knn classifier for features x and labels y.
It is assumed that y is a binary vector with final dimension num_labels.
Args:
x: The features with shape (n_examples, n_features).
y: The labels with shape (n_examples, n_labels).
"""
if y.dtype != np.int32:
raise ValueError(f'y should have type np.int32, got {y.dtype}')
if np.amax(y) > 1 or np.amin(y) < 0:
raise ValueError(
f'y must contain only 0s and 1s, got {np.amax(y), np.amin(y)}')
self._y0 = y[0]
self._classifier.fit(x, y)
def predict(self, x: np.ndarray) -> np.ndarray:
"""Computes predictions for features x.
Args:
x: The features to compute predictions for.
Returns:
An array of shape (n_examples, n_labels), where result[i, j] represents
probability that the jth label is present for the ith example.
"""
predictions = self._classifier.predict_proba(x)
result = []
# This little dance is required since sklearn "automatically" computes the
# number of classes for each label. If any label is all 1 or all 0, sklearn
# will presume that this label only has a single class. We thus have handle
# this case explicitly.
for i, prediction in enumerate(predictions):
if prediction.shape[-1] == 2:
result.append(prediction)
elif prediction.shape[-1] == 1:
predicted_class = int(self._y0[i])
prediction = np.zeros((prediction.shape[0], 2))
prediction[:, predicted_class] = 1
result.append(prediction)
else:
raise ValueError(f'Unexpected num classes: {prediction.shape[-1]}')
# The result returned by sklearn is a list of length n_labels, each of
# which contains an array of shape (n_examples, n_classes).
# This is because sklearn supports the case that each label has a variable
# number of classes. In our case, we know they are all binary.
result = np.stack(result, axis=0)
result = np.transpose(result, (1, 0, 2))
result = result[:, :, 1]
return result
|
dm_nevis-master
|
experiments_jax/training/evaluate_embeddings.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for custom optimizers and masks."""
from typing import Any, FrozenSet, Optional
import chex
import optax
import tree
DEFAULT_NAMES_TO_DECAY = frozenset(['w'])
def sgdw(learning_rate: Any,
*,
weight_decay: float,
nesterov: Optional[float] = None,
momentum: Optional[float] = None,
mask: Optional[Any] = None) -> optax.GradientTransformation:
"""SGD with l2 weight decay."""
# Decay is applied before scaling by l.r., so it is close to the classical
# L2-regularized loss optimization.
return optax.chain(
optax.add_decayed_weights(weight_decay, mask),
optax.sgd(
learning_rate=learning_rate,
nesterov=nesterov,
momentum=momentum,
),
)
def default_weight_decay_mask(
updates: chex.ArrayTree,
*,
names_to_decay: FrozenSet[str] = DEFAULT_NAMES_TO_DECAY) -> chex.ArrayTree:
"""Masks all updates in the tree that don't have a name in the given list.
Args:
updates: The updates to mask.
names_to_decay: The names of the parameters to apply weight decay to.
Returns:
A tree of the same shape as updates, with the value of 1 for all input
tensors that have a name in the given list.
"""
def mask(path, _):
return path[-1] in names_to_decay
return tree.map_structure_with_path(mask, updates)
|
dm_nevis-master
|
experiments_jax/training/optimizers.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameter tuner library.
This library is intended for executing multiple long running cost functions
within workers. Each time a cost function completes, the resulting cost is
compared to the values that have been computed so far, and ultimately the lowest
cost solution is returned.
This library contains a minimum interface require to distribute computation,
and currently uses concurrent.Executor instances to manage tasks. To generalize
to distribute compute, a failure-robust executor with the ability to schedule
on remote machines would be needed.
The cost function being executed by each worker supports checkpointing. These
checkpoints are written to a checkpoint store that is independent from the main
thread maintaining the minimum cost results found so far. We can safely do this
due to the idempotency of the workers - calls to the cost function are
stateless, and so the fact that they start from scratch or resume from an
checkpoint can be totally hidden from the code that is scheduling the job. From
the perspective of the top-level search, tasks are scheduled and eventually
complete. To ensure that the tasks and their checkpoints are all kept in sync
with the learner, each search has a unique `search_id` that is stored along with
all checkpoints (both in the top level search process, and in the workers).
"""
from concurrent import futures
import dataclasses
import json
import os
from typing import Any, Callable, Dict, NamedTuple, Optional, Sequence, Set, Protocol
import uuid
from absl import logging
from dm_nevis.benchmarker.environment import checkpointer_interface
from dm_nevis.benchmarker.learners import learner_interface
DEFAULT_TOPIC_NAME = 'default'
DEEPMIND_A100_DEVICES_PER_NODE = 16
DEFAULT_RETRIES_ON_FAILURE = 3
Context = Any
Outputs = Any
Cost = Any
Checkpoint = Any
CheckpointFn = Callable[[Checkpoint], None]
Overrides = Dict[str, Any]
# Note(rhemsley): Checkpointing can take several minutes, and blocks
# receiving of new tasks, we thus disable checkpointing of responses by default,
# since it can delay the running of the searcher by a very long time.
# It may make sense to enable this for the case that jobs take very different
# amounts of time.
SHOULD_CHECKPOINT_RESPONSES = False
class CostFn(Protocol):
def __call__(self,
context: Context,
overrides: Overrides,
*,
write_checkpoint: CheckpointFn = lambda _: None,
checkpoint_to_resume: Optional[Checkpoint] = None) -> Cost:
...
CostFunctionBuilder = Callable[[], CostFn]
CheckpointerBuilder = Callable[[str], checkpointer_interface.Checkpointer]
@dataclasses.dataclass
class SearchResult:
cost: Cost
outputs: Outputs
overrides: Overrides
resources_used: learner_interface.ResourceUsage
class HyperParameterTunerWorkers(NamedTuple):
queue: Any
class JobKey(NamedTuple):
"""A key to uniquely identify a job in a hyperparameter search."""
search_id: str
job_index: int
def to_str(self) -> str:
return json.dumps({
'search_id': self.search_id,
'job_index': self.job_index,
})
@classmethod
def from_str(cls, v: str) -> 'JobKey':
dct = json.loads(v)
return cls(**dct)
@dataclasses.dataclass(frozen=True)
class Task:
context: Context
overrides: Overrides
@dataclasses.dataclass(frozen=True)
class HyperParameterTunerState:
search_id: str
min_cost: Optional[float]
min_cost_overrides: Optional[Overrides]
min_cost_outputs: Optional[Outputs]
completed_job_keys: Set[JobKey]
search_space: Sequence[Overrides]
total_resources_used: learner_interface.ResourceUsage
class HyperparameterTuner:
"""A hyperparameter tuner using a pool of workers."""
def __init__(self, workers: HyperParameterTunerWorkers):
self._queue = workers.queue
def minimize(self,
context: Context,
search_space: Sequence[Overrides],
*,
checkpoint_to_resume: Optional[Checkpoint] = None,
write_checkpoint: CheckpointFn = lambda _: None) -> SearchResult:
"""Minimizes the cost function over the search space using workers.
The tuner supports checkpointing and resuming the cost function tasks
using the 1) the write_checkpoint() callback - which allows the running
hyper parameter tuner to store checkpointed state, and 2) The
checkpoint_to_resume value, which can be set with the most recently written
value that was written to write_checkpoint before failure occurred.
Args:
context: An arbitrary (picklable) state object to pass to the cost
function. This is for passing values to the cost function that do not
change in the search space.
search_space: The space over which to search. All configuration to be
passed to thecost function must be passed through here. The search space
must be deterministic, otherwise preemptions would cause all progress
made running training jobs to be lost.
checkpoint_to_resume: Resume a previously checkpointed hyperparameter
search.
write_checkpoint: A callable to checkpoint the progress of this search.
Returns:
The minimum cost obtained over the results. Note that the result is
non-deterministic.
"""
state = HyperParameterTunerState(
search_id=uuid.uuid4().hex,
min_cost=None,
min_cost_overrides=None,
min_cost_outputs=None,
completed_job_keys=set(),
search_space=tuple(search_space),
total_resources_used=learner_interface.ResourceUsage(
floating_point_operations=0,
peak_parameter_count=0,
peak_parameter_size_bytes=0),
)
if checkpoint_to_resume:
state = checkpoint_to_resume
logging.info('Resuming checkpointed search `%s`...', state.search_id)
assert isinstance(state, HyperParameterTunerState)
else:
# We checkpoint the initial state, to ensure a deterministic search space.
logging.info('Starting new search `%s`...', state.search_id)
write_checkpoint(state)
futures_to_job_key = {}
for i, overrides in enumerate(search_space):
job_key = _make_job_key(state.search_id, i)
if job_key in state.completed_job_keys:
logging.info('Skipping previously completed job: %s', job_key)
continue
logging.info('Enqueueing job: %s, overrides: %s', job_key, overrides)
task = Task(context, overrides)
future = self._queue.enqueue_task(job_key, task)
futures_to_job_key[future] = job_key
for future in futures.as_completed(futures_to_job_key):
job_key = futures_to_job_key[future]
result: SearchResult = future.result()
logging.info(
'Received result for task: %s (%d/%d), cost: %s',
job_key,
len(state.completed_job_keys) + 1,
len(futures_to_job_key),
result.cost,
)
state = dataclasses.replace(
state,
total_resources_used=state.total_resources_used.combine(
result.resources_used),
completed_job_keys=state.completed_job_keys | set([job_key]),
)
if state.min_cost is None or result.cost < state.min_cost:
state = dataclasses.replace(
state,
min_cost=result.cost,
min_cost_outputs=result.outputs,
min_cost_overrides=result.overrides,
)
if SHOULD_CHECKPOINT_RESPONSES:
write_checkpoint(state)
assert isinstance(state, HyperParameterTunerState)
logging.info('Minimum of %s achieved with %s', state.min_cost,
state.min_cost_overrides)
return SearchResult(
cost=state.min_cost,
outputs=state.min_cost_outputs,
overrides=state.min_cost_overrides,
resources_used=state.total_resources_used,
)
def default_checkpointer_builder(
job_namespace: str) -> checkpointer_interface.Checkpointer:
"""Builds a checkpointer that does nothing."""
class NoOpCheckpointer:
def write(self, state):
del state
def restore(self):
return None
del job_namespace
return NoOpCheckpointer()
def build_local_executor_workers(
cost_function_builder: CostFunctionBuilder,
*,
executor: Callable[..., futures.Executor],
num_workers: int,
checkpointer_builder: CheckpointerBuilder = default_checkpointer_builder,
) -> HyperParameterTunerWorkers:
"""Constructs workers that operate locally in a pool of processes."""
class LocalProcessPoolQueue:
"""Wrap a process pool to give a local task queue."""
def __init__(self, max_workers: int,
cost_function_builder: CostFunctionBuilder,
checkpointer_builder: CheckpointerBuilder):
self._pool = executor(max_workers=max_workers)
self._cost_function_builder = cost_function_builder
self._checkpointer_builder = checkpointer_builder
def close(self):
self._pool.shutdown()
def enqueue_task(self, job_key, task):
return self._pool.submit(_worker, job_key, task,
self._cost_function_builder,
self._checkpointer_builder)
return HyperParameterTunerWorkers(
LocalProcessPoolQueue(
max_workers=num_workers,
cost_function_builder=cost_function_builder,
checkpointer_builder=checkpointer_builder,
))
def _worker(
job_key: JobKey,
task: Task,
cost_function_builder: CostFunctionBuilder,
checkpointer_builder: CheckpointerBuilder,
) -> SearchResult:
"""Worker function to be executed on worker nodes."""
cost_function = cost_function_builder()
logging.info('[Process %s] Received task %s: %s', os.getpid(), job_key,
task.overrides)
job_namespace = _job_key_to_namespace(job_key)
checkpointer = checkpointer_builder(job_namespace)
cost, outputs, resources_used = cost_function(
task.context,
task.overrides,
checkpoint_to_resume=checkpointer.restore(),
write_checkpoint=checkpointer.write)
logging.info('Completed task %s: cost: %s', job_key, cost)
return SearchResult(cost, outputs, task.overrides, resources_used)
def _make_job_key(search_id: str, job_index: int) -> JobKey:
return JobKey(
search_id=search_id,
job_index=job_index,
)
def _job_key_to_namespace(job_key: JobKey) -> str:
return f'hyperparameter_search_{job_key.search_id}_job_{job_key.job_index}'
|
dm_nevis-master
|
experiments_jax/training/hyperparameter_searcher.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements customized ResNets that return embeddings and logits."""
from typing import Dict, Mapping, Optional, Sequence, Union
import haiku as hk
import jax
import jax.numpy as jnp
FloatStrOrBool = Union[str, float, bool]
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ResNet(hk.nets.ResNet):
"""Original Resnet model that returns embeddings."""
def __init__(self, num_classes=10, **kwargs):
# haiku expects num_classes, but the top layer won't be instantiated.
super().__init__(num_classes=num_classes, **kwargs)
def __call__(self,
inputs: jnp.ndarray,
is_training: bool,
test_local_stats: bool = False) -> Dict[str, jnp.ndarray]:
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=(1, 2))
return out
class ResNet18(ResNet):
"""ResNet18 model."""
def __init__(
self,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet18 model that returns both embeddings and logits.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride of
convolutions for each block in each group.
"""
super().__init__(bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
name=name,
**hk.nets.ResNet.CONFIGS[18])
class ResNet50(ResNet):
"""ResNet50 model."""
def __init__(
self,
bn_config: Optional[Mapping[str, FloatStrOrBool]] = None,
resnet_v2: bool = False,
name: Optional[str] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
strides: Sequence[int] = (1, 2, 2, 2),
):
"""Constructs a ResNet50 model (hk.ResNet18 that returns both embeddings and logits).
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
strides: A sequence of length 4 that indicates the size of stride
of convolutions for each block in each group.
"""
super().__init__(num_classes=1, # fake head
bn_config=bn_config,
initial_conv_config=initial_conv_config,
resnet_v2=resnet_v2,
strides=strides,
name=name,
**hk.nets.ResNet.CONFIGS[50])
class CifarResNet(hk.Module):
"""ResNet model for CIFAR10 (almost) following original ResNet paper.
This is different from the haiku.nets.Resnet implementation in two ways:
1. Initital convolution is 3x3 with stride 1 (instead of 7x7 with stride 2),
2. No max-pool before the block groups.
Note: the haiku.nets.Resnet implementation fits larger inputs better
(e.g., Imagenet).
Original ResNet paper (arxiv:1512.03385) allows uses fewer channels but we do
not implement that here.
"""
def __init__(
self,
blocks_per_group: Sequence[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
name: Optional[str] = None,
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
name: Name of the module.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault("decay_rate", 0.9)
bn_config.setdefault("eps", 1e-5)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
self.initial_conv = hk.Conv2D(
output_channels=64,
kernel_shape=3,
stride=1,
with_bias=False,
padding="SAME",
name="initial_conv")
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm",
**bn_config)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
hk.nets.ResNet.BlockGroup(channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name="block_group_%d" % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config)
def __call__(self,
inputs: jnp.ndarray,
is_training: bool,
test_local_stats: bool = False) -> Dict[str, jnp.ndarray]:
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=(1, 2))
return out
class CifarResNet18(CifarResNet):
"""CifarResNet18."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(2, 2, 2, 2),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
use_projection=(False, True, True, True),
name=name)
class CifarResNet34(CifarResNet):
"""CifarResNet34."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the model.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
use_projection=(False, True, True, True),
name=name)
class CifarResNet50(CifarResNet):
"""CifarResNet50."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class CifarResNet101(CifarResNet):
"""CifarResNet101."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 23, 3),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class CifarResNet152(CifarResNet):
"""CifarResNet152."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 8, 36, 3),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class CifarResNet200(CifarResNet):
"""CifarResNet200."""
def __init__(self,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 24, 36, 3),
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
|
dm_nevis-master
|
experiments_jax/training/resnet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.modules."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_jax.training import modules
import haiku as hk
import jax.numpy as jnp
class ModelsTest(parameterized.TestCase):
def test_flatten_only(self):
batch_size = 4
height, width = 8, 8
image = jnp.zeros([batch_size, height, width, 3])
rng = hk.PRNGSequence(1)
def forward_fn(image, is_training):
model = modules.FlattenOnly()
return model(image, is_training=is_training)
forward_t = hk.transform_with_state(forward_fn)
params, state = forward_t.init(next(rng), image, is_training=True)
h_train, _ = forward_t.apply(
params, state, next(rng), image, is_training=True)
h_test, _ = forward_t.apply(
params, state, next(rng), image, is_training=False)
self.assertSequenceEqual(h_train.shape, [batch_size, height*width*3])
self.assertSequenceEqual(h_test.shape, [batch_size, height*width*3])
def test_mlp(self):
batch_size = 4
height, width = 8, 8
image = jnp.zeros([batch_size, height, width, 3])
rng = hk.PRNGSequence(1)
def forward_fn(image, is_training):
model = modules.MLP(output_sizes=[16, 16])
return model(image, is_training=is_training)
forward_t = hk.transform_with_state(forward_fn)
params, state = forward_t.init(next(rng), image, is_training=True)
h_train, _ = forward_t.apply(
params, state, next(rng), image, is_training=True)
h_test, _ = forward_t.apply(
params, state, next(rng), image, is_training=False)
self.assertSequenceEqual(h_train.shape, [batch_size, 16])
self.assertSequenceEqual(h_test.shape, [batch_size, 16])
def test_convnet(self):
batch_size = 4
height, width = 8, 8
image = jnp.zeros([batch_size, height, width, 3])
rng = hk.PRNGSequence(1)
def forward_fn(image, is_training):
model = modules.ConvNet()
return model(image, is_training=is_training)
forward_t = hk.transform_with_state(forward_fn)
params, state = forward_t.init(next(rng), image, is_training=True)
h_train, _ = forward_t.apply(
params, state, next(rng), image, is_training=True)
h_test, _ = forward_t.apply(
params, state, next(rng), image, is_training=False)
self.assertLen(h_train.shape, 2)
self.assertLen(h_test.shape, 2)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/modules_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hyperparameter_searcher."""
from concurrent import futures
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.learners import learner_interface
from experiments_jax.training import hyperparameter_searcher
DEFAULT_NUM_WORKERS = 3
class HyperparameterSearcherTest(parameterized.TestCase):
def test_hyperparameter_searcher(self):
def cost_function_builder():
def f(state, overrides, **kwargs):
del state, overrides, kwargs
return 0, None, learner_interface.ResourceUsage()
return f
workers = hyperparameter_searcher.build_local_executor_workers(
cost_function_builder,
num_workers=DEFAULT_NUM_WORKERS,
executor=futures.ThreadPoolExecutor)
_test_searcher(workers)
def _test_searcher(workers):
searcher = hyperparameter_searcher.HyperparameterTuner(workers)
# All config is passed through the search space. Fixed config is set
# using a search space with a single value.
search_space = _product([
_sweep('task_key', [0]),
_sweep('learning_rate', [1, 2, 3]),
])
result = searcher.minimize(None, search_space)
assert result.cost == 0
workers.queue.close()
def _product(sweeps):
dcts, *sweeps = sweeps
for sweep_dcts in sweeps:
new_dcts = []
for sweep_dct in sweep_dcts:
new_dcts.extend({**dct, **sweep_dct} for dct in dcts)
dcts = new_dcts
return dcts
def _sweep(key, values):
return [{key: value} for value in values]
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/hyperparameter_searcher_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for transfer_matrix_from_knn."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.datasets.builders import test_dataset as test_dataset_builder
from experiments_jax.training import transfer_matrix_from_knn
import numpy as np
class TransferMatrixFromKnnTest(parameterized.TestCase):
def test_give_me_a_name(self):
train_dataset = test_dataset_builder.get_dataset('train', start=0, end=10)
test_dataset = test_dataset_builder.get_dataset('test', start=0, end=10)
def embedding_fn(state, batch):
del state
return np.zeros([batch.image.shape[0], 32])
t1 = tasks.TaskKey(
'task_1',
tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(2),
)
t2 = tasks.TaskKey(
'task_2',
tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(2),
)
tasks_and_train_states = [(t1, 0), (t2, 1)]
m = transfer_matrix_from_knn.compute_transfer_matrix_using_knn_classifier(
embedding_fn,
tasks_and_train_states,
train_dataset=train_dataset,
test_dataset=test_dataset,
batch_size=2,
preprocessing_fn=lambda x: x,
)
self.assertEqual(m.source_tasks, [t1, t2])
self.assertEqual(m.target_tasks, [train_dataset.task_key])
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/transfer_matrix_from_knn_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for estimating resource usage of jax functions."""
from typing import Any, Callable
import jax
def estimate_flops(fn: Callable[..., Any], *args, **kwargs) -> float:
"""Runs the given jitable JAX function with parameters and return #FLOPs."""
xe = jax.lib.xla_client._xla # pylint: disable=protected-access
xla_backend = jax.lib.xla_bridge.get_backend("cpu")
static_argnums = kwargs.pop("static_argnums", ())
c = jax.xla_computation(fn, static_argnums=static_argnums)(*args, **kwargs)
e = xla_backend.compile(c)
m, = e.hlo_modules()
analysis = xe.hlo_module_cost_analysis(xla_backend, m)
return float(analysis["flops"])
|
dm_nevis-master
|
experiments_jax/training/resources.py
|
"""Tests for hype."""
from absl.testing import absltest
import chex
from experiments_jax.training import hype
class HypeTest(absltest.TestCase):
def test_search_space_consistent(self):
"""Test that the search space generated is deterministic."""
expected = [{
'lr': 0.0002529,
'ls': 0.2868102
}, {
'lr': 0.0348578,
'ls': 0.2843482
}, {
'lr': 0.0195579,
'ls': 0.0169654
}, {
'lr': 0.0005823,
'ls': 0.0254615
}, {
'lr': 0.0030641,
'ls': 0.2506496
}, {
'lr': 0.0022308,
'ls': 0.2207909
}, {
'lr': 0.0090111,
'ls': 0.2009191
}]
sweep = hype.zipit(
hype.sweep(
'lr',
hype.log_uniform_random_values(1e-4, 1e-1, seed=1, n=7),
), hype.sweep(
'ls',
hype.uniform_random_values(0.0, 0.3, seed=2, n=7),
))
chex.assert_trees_all_close(list(sweep), expected, atol=1e-4)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/hype_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model architectures."""
import inspect
from typing import Any, Callable, Iterable, Optional, Sequence, Type
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class FlattenOnly(hk.Module):
"""Null-model: Just flatten the image.
The linear layer in the head will perform logistic regression.
"""
def __call__(self, image: chex.Array, is_training: bool) -> chex.Array:
del is_training
return hk.Flatten()(image)
class MLP(hk.Module):
"""Flatten the image and apply a naive MLP with dropout."""
def __init__(self,
output_sizes: Sequence[int] = (4096, 4096),
dropout_rate: Optional[float] = 0.5,
name: Optional[str] = None):
super().__init__(name)
self._dropout_rate = dropout_rate
self._model = hk.nets.MLP(output_sizes=output_sizes, activate_final=True)
def __call__(self, image: chex.Array, is_training: bool) -> chex.Array:
h = hk.Flatten()(image)
if is_training and self._dropout_rate:
h = self._model(h, dropout_rate=self._dropout_rate, rng=hk.next_rng_key())
else:
h = self._model(h)
return h
class ConvNet(hk.Module):
"""A naive ConvNet."""
def __init__(self, top_reduce: str = "mean", name: Optional[str] = None):
super().__init__(name=name)
self._top_reduce = top_reduce
self._initial_conv = hk.Conv2D(128, [5, 5], stride=3)
self._channels_per_stage = [
[128, 128],
[256, 256],
]
def __call__(self, image: chex.Array, is_training: bool) -> chex.Array:
del is_training
h = self._initial_conv(image)
h = jax.nn.relu(h)
for stage in self._channels_per_stage:
for channels in stage:
h = hk.Conv2D(channels, [3, 3])(image)
h = jax.nn.relu(h)
h = hk.MaxPool([2, 2], [2, 2], padding="SAME", channel_axis=-1)(h)
if self._top_reduce == "flatten":
h = hk.Flatten()(h)
elif self._top_reduce == "mean":
h = jnp.mean(h, axis=[1, 2])
elif self._top_reduce == "max":
h = jnp.max(h, axis=[1, 2])
else:
raise ValueError(f"Unknown reduction-type {self._top_reduce}")
return h
VGG_DEFAULT_CHANNELS = (64, 64, 128, 128, 128, 256, 256, 256, 512, 512, 512)
VGG_DEFAULT_STRIDES = (1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1)
VGG_4X_CHANNELS = tuple(4*c for c in VGG_DEFAULT_CHANNELS)
class VGG(hk.Module):
"""VGG Network with dropout, batchnorm and without maxpool."""
def __init__(self,
output_channels: Sequence[int] = VGG_DEFAULT_CHANNELS,
strides: Sequence[int] = VGG_DEFAULT_STRIDES,
dropout_rate: float = 0.0,
name: Optional[str] = None):
super().__init__(name=name)
self._dropout_rate = dropout_rate
self._output_channels = output_channels
self._strides = strides
self._kernel_shapes = [[3, 3]] * len(self._output_channels)
num_channels = len(self._output_channels)
self._conv_modules = [
hk.Conv2D( # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
name=f"conv_2d_{i}") for i in range(num_channels)
]
self._bn_modules = [
hk.BatchNorm( # pylint: disable=g-complex-comprehension
create_offset=True,
create_scale=False,
decay_rate=0.999,
name=f"batch_norm_{i}") for i in range(num_channels)
]
def __call__(self, inputs, is_training, test_local_stats=True):
h = inputs
for conv_layer, bn_layer in zip(self._conv_modules, self._bn_modules):
h = conv_layer(h)
h = bn_layer(
h, is_training=is_training, test_local_stats=test_local_stats)
if self._dropout_rate > 0 and is_training:
h = hk.dropout(hk.next_rng_key(), rate=self._dropout_rate, x=h)
h = jax.nn.relu(h)
# Avg pool along axis 1 and 2
h = jnp.mean(h, axis=[1, 2], keepdims=False, dtype=jnp.float64)
return h
class DropConnect(hk.Module):
"""Batchwise Dropout used in EfficientNet."""
def __init__(self, rate: float, name: Optional[str] = None):
"""Constructs a DropConnect module.
Args:
rate: Probability that each element of x is discarded. Must be a scalar in
the range `[0, 1)`.
name: (Optional) Name for this module.
"""
super().__init__(name=name)
self._rate = rate
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
if not is_training:
return x
batch_size = x.shape[0]
r = jax.random.uniform(
hk.next_rng_key(), [batch_size] + [1] * (x.ndim - 1), dtype=x.dtype)
keep_prob = 1. - self._rate
binary_tensor = jnp.floor(keep_prob + r)
return (x / keep_prob) * binary_tensor
class Dropout(hk.Module):
"""Dropout as a module."""
def __init__(self, rate: float, name: Optional[str] = None):
"""Constructs a Dropout module.
Args:
rate: Probability that each element of x is discarded. Must be a scalar in
the range `[0, 1)`.
name: (Optional) Name for this module.
"""
super().__init__(name=name)
self._rate = rate
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
if not is_training:
return x
return hk.dropout(hk.next_rng_key(), self._rate, x)
# We copy and adapt the attention component of Flax as Haiku's version does
# slightly different computations and prevents us from using pretrained
# checkpoints.
def _dot_product_attention(query, key, value, dtype=jnp.float32, axis=None):
"""Computes dot-product attention given query, key, and value."""
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError("Attention axis must be between the batch "
"axis and the last-two axes.")
depth = query.shape[-1]
n = key.ndim
# `batch_dims` is <bs, <non-attention dims>, num_heads>.
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels).
qk_perm = batch_dims + axis + (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>).
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
attn_weights = jax.lax.dot_general(query, key, (((n - 1,), (n - 1,)),
(batch_dims_t, batch_dims_t)))
# Normalize the attention weights.
norm_dims = tuple(range(attn_weights.ndim - len(axis), attn_weights.ndim))
attn_weights = jax.nn.softmax(attn_weights, axis=norm_dims)
attn_weights = attn_weights.astype(dtype)
# Compute the new values given the attention weights.
wv_contracting_dims = (norm_dims, range(value.ndim - len(axis), value.ndim))
y = jax.lax.dot_general(attn_weights, value,
(wv_contracting_dims, (batch_dims_t, batch_dims_t)))
# Back to (bs, dim1, dim2, ..., dimN, num_heads, channels).
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
perm_inv = _invert_perm(qk_perm)
y = y.transpose(perm_inv)
return y
# Adapted from `hk.MultiHeadAttention` but using the Flax attention function.
class MultiHeadAttention(hk.Module):
"""Multi-headed attention mechanism."""
def __init__(self,
num_heads: int,
key_size: int,
w_init: Optional[hk.initializers.Initializer] = None,
query_size: Optional[int] = None,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
name: Optional[str] = None):
super().__init__(name=name)
self.num_heads = num_heads
self.key_size = key_size
self.query_size = query_size or key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * num_heads
if w_init is None:
self.w_init = hk.initializers.VarianceScaling(1., "fan_avg", "uniform")
else:
self.w_init = w_init
def __call__(self, query: chex.Array, key: chex.Array,
value: chex.Array) -> chex.Array:
"""Compute MHA with queries, keys & values."""
query_heads = self._linear_projection(query, self.query_size, "query")
key_heads = self._linear_projection(key, self.key_size, "key")
value_heads = self._linear_projection(value, self.value_size, "value")
attention_vec = _dot_product_attention(
query_heads, key_heads, value_heads, dtype=query.dtype, axis=1)
attention_vec = jnp.reshape(attention_vec, (*query.shape[:2], -1))
return hk.Linear(self.model_size, w_init=self.w_init)(attention_vec)
@hk.transparent
def _linear_projection(self,
x: chex.Array,
head_size: int,
name: Optional[str] = None) -> chex.Array:
y = hk.Linear(self.num_heads * head_size, w_init=self.w_init, name=name)(x)
return y.reshape((*x.shape[:2], self.num_heads, head_size))
class SelfAttention(MultiHeadAttention):
"""Self-attention mechanism."""
def __call__(self, x: chex.Array) -> chex.Array:
return super().__call__(x, x, x)
def filter_kwargs(fn_or_class: Callable[..., Any]) -> Callable[..., Any]:
"""Argument cleaner for functions and class constructers."""
method_fn = (fn_or_class.__init__ if isinstance(fn_or_class, Type) else
fn_or_class)
if isinstance(method_fn, hk.Module):
# Haiku wraps `__call__` and destroys the `argspec`. However, it does
# preserve the signature of the function.
fn_args = list(inspect.signature(method_fn.__call__).parameters.keys())
else:
fn_args = inspect.getfullargspec(method_fn).args
if fn_args and "self" == fn_args[0]:
fn_args = fn_args[1:]
def wrapper(*args, **kwargs):
common_kwargs = {}
if len(args) > len(fn_args):
raise ValueError("Too many positional arguments.")
for k, v in zip(fn_args, args):
common_kwargs[k] = v
for k, v in kwargs.items():
if k in common_kwargs:
raise ValueError(
"{} already specified as a positional argument".format(k))
if k in fn_args:
common_kwargs[k] = v
return fn_or_class(**common_kwargs)
return wrapper
|
dm_nevis-master
|
experiments_jax/training/modules.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module implementing primitives for training by gradient descent."""
import dataclasses
import functools
import time
from typing import Any, Callable, Iterator, Mapping, Optional, Tuple
from absl import logging
import chex
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from experiments_jax.environment import pickle_checkpointer
from experiments_jax.training import models
from experiments_jax.training import resources
import haiku as hk
import jax
import optax
LOG_INTERVAL = 10
DEFAULT_MOVING_AVERAGE_ALPHA = 0.8
UpdateFn = Any
LoadParamsFn = Callable[[hk.Params, hk.State], Tuple[hk.Params, hk.Params,
hk.State]]
@chex.dataclass
class TrainState:
rng: chex.PRNGKey
trainable_params: hk.Params
frozen_params: hk.Params
state: hk.State
opt_state: optax.OptState
def init_train_state(
rng: chex.PRNGKey,
model: models.Model,
opt: optax.GradientTransformation,
load_params_fn: Optional[LoadParamsFn] = None,
*,
log_model_summary: bool = True,
) -> TrainState:
"""Initializes model parameter and state.
Args:
rng: random seed.
model: the model.
opt: the optimizer.
load_params_fn: Optional function to load pre-trained parameters and/or to
freeze a subset of the parameters. The function takes the models randomly
initialized parameters and state structures, and returns a
(trainable_params, frozen_params, state) tuple.
log_model_summary: When True, logs information about the initialized
parameters and state.
Returns:
A TrainState structure.
"""
init_rng, rng = jax.random.split(rng)
params, state = model.init(init_rng)
if load_params_fn:
trainable_params, frozen_params, state = load_params_fn(params, state)
else:
trainable_params, frozen_params = params, {}
opt_state = opt.init(trainable_params)
if log_model_summary:
logging.info("Model parameters: \n%s",
models.param_summary(trainable_params))
logging.info("Frozen parameters: \n%s", models.param_summary(frozen_params))
logging.info("Model state: \n%s", models.param_summary(state))
logging.info("Model size (train-params/frozen-params/state): %s / %s / %s",
models.size_summary(trainable_params),
models.size_summary(frozen_params), models.size_summary(state))
return TrainState(
rng=rng,
trainable_params=trainable_params,
frozen_params=frozen_params,
state=state,
opt_state=opt_state)
def build_update_fn(task_key: tasks.TaskKey, model: models.Model,
opt: optax.GradientTransformation) -> UpdateFn:
"""Builds an update function for updating train state using an optimizer."""
def update_fn(batch: datasets.MiniBatch,
train_state: TrainState,
lr_scale: float = 1.) -> Tuple[TrainState, Mapping[str, Any]]:
@functools.partial(jax.value_and_grad, has_aux=True)
def grad_fn(trainable_params, frozen_params, state, rng, batch):
params = hk.data_structures.merge(trainable_params, frozen_params)
fn = model.loss_and_metrics[task_key]
label = None
if task_key.kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
label = batch.multi_label_one_hot
else:
label = batch.label
(loss, metrics), state = fn(params, state, rng, batch.image, label, True)
return loss, (metrics, state)
step_rng, next_rng = jax.random.split(train_state.rng)
((loss, (metrics, state)),
grad) = grad_fn(train_state.trainable_params, train_state.frozen_params,
train_state.state, step_rng, batch)
updates, opt_state = opt.update(grad, train_state.opt_state,
train_state.trainable_params)
updates = jax.tree_map(lambda x: lr_scale * x, updates)
trainable_params = optax.apply_updates(train_state.trainable_params,
updates)
metrics = {"loss": loss, **metrics}
metrics = jax.tree_map(lambda x: x.mean(), metrics)
train_state = dataclasses.replace(
train_state,
rng=next_rng,
trainable_params=trainable_params,
state=state,
opt_state=opt_state)
return train_state, metrics
return update_fn
def restore_train_state(train_state_checkpoint_path: str) -> TrainState:
"""Load train state from checkpoint path if it has been saved to disk."""
if train_state_checkpoint_path is None:
return None
checkpointer = pickle_checkpointer.PickleCheckpointer(
train_state_checkpoint_path)
train_state = checkpointer.restore()
return train_state
def save_train_state(checkpoint_file_path: str, task_key: tasks.TaskKey,
train_state: TrainState):
logging.info("Saving train state for train task %s to %s", task_key.name,
checkpoint_file_path)
checkpointer = pickle_checkpointer.PickleCheckpointer(checkpoint_file_path)
checkpointer.write(train_state)
def fit_params(
train_state: TrainState,
train_iter: Iterator[datasets.MiniBatch],
update_fn: UpdateFn,
num_steps: int,
on_complete_step: Callable[[int, TrainState], None],
metrics_writer: datawriter_interface.DataWriter,
initial_global_step: Optional[int] = None) -> Tuple[TrainState, float]:
"""Runs gradient descent+optimizer step for the given number of steps."""
global_step = initial_global_step or 0
estimated_flops = None
step_counter = StepCountEstimator()
while global_step < num_steps:
t = time.monotonic()
batch = next(train_iter)
logging.log_every_n(logging.INFO, "Step: %d/%d, Batch %s", LOG_INTERVAL,
global_step + 1, num_steps, batch)
train_state, metrics = update_fn(batch, train_state)
global_step += 1
on_complete_step(global_step, train_state)
metrics = {
"global_step": global_step,
"steps_per_second": step_counter.estimated_steps_per_second(),
**metrics
}
metrics_writer.write(metrics)
if estimated_flops is None:
estimated_flops = resources.estimate_flops(update_fn, batch, train_state)
step_counter.add_measurement(time.monotonic() - t)
return train_state, (estimated_flops or 0.0) * num_steps
class StepCountEstimator:
"""Estimates how many steps per second are achieved during trainnig."""
def __init__(self, alpha: float = DEFAULT_MOVING_AVERAGE_ALPHA) -> None:
self._ema = None
self._alpha = alpha
def add_measurement(self, elapsed_seconds: float):
if self._ema is None:
self._ema = elapsed_seconds
else:
self._ema = self._alpha * self._ema + (1 - self._alpha) * elapsed_seconds
def estimated_steps_per_second(self) -> float:
if not self._ema:
return float("nan")
return 1 / self._ema
|
dm_nevis-master
|
experiments_jax/training/trainer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.heads."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_jax.training import heads
import haiku as hk
import jax
import numpy as np
class HeadsTest(parameterized.TestCase):
def test_metrics_multi_label(self):
num_labels = 17
num_examples = 29
@hk.transform_with_state
def f(inputs, targets):
head = heads.MultiLabelHead(num_classes=num_labels)
return head.loss_and_metrics(inputs, targets, is_training=False)
gen = np.random.default_rng(0)
inputs = gen.normal(size=(num_examples, num_labels))
targets = np.ones((num_examples, num_labels))
params, state = f.init(jax.random.PRNGKey(0), inputs, targets)
(_, diagnostics), _ = f.apply(params, state, None, inputs, targets)
error = diagnostics['error']
self.assertLessEqual(np.max(error), 1.0)
self.assertGreaterEqual(np.min(error), 0.0)
# We have p=0.5 chance of getting each prediction correct, summed over
# a number of iid trials.
expected_value = 0.5
self.assertAlmostEqual(expected_value, error.mean(), delta=0.1)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/heads_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_jax.training.augmentations."""
import functools
from typing import Any, Mapping, Sequence, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.benchmarker.datasets import datasets
from experiments_jax.training import augmentations
import numpy as np
import tensorflow as tf
class AugmentationsTest(parameterized.TestCase):
def test_chain(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
augmentation_fn = functools.partial(
augmentations.chain,
augmentation_ctors_with_kwargs=[
(augmentations.normalize, {}),
(augmentations.resize, {
'size': (30, 30)
}),
(augmentations.random_crop, {
'size': (20, 20)
}),
])
ds = ds.map(augmentation_fn)
items = list(ds)
self.assertLen(items, 2)
for item in items:
# Grayscale images should be converted to color.
self.assertEqual(3, item.image.shape[-1])
self.assertEqual((20, 20, 3), item.image.shape)
def test_normalize(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.normalize)
items = list(ds)
self.assertLen(items, 2)
for item in items:
# Grayscale images should be converted to color.
self.assertEqual(3, item.image.shape[-1])
def test_standardize_per_image(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.standardize_per_image)
items = list(ds)
# We only test whether this does compile.
self.assertLen(items, 2)
def test_random_flip(self):
ds = _fixture_dataset([((30, 60, 3), 0), ((30, 60, 1), 0)])
ds = ds.map(augmentations.random_flip)
items = list(ds)
# We only test whether this does compile.
self.assertLen(items, 2)
def test_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.resize, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_central_crop_via_cropped_window_and_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.central_crop_via_cropped_window_and_resize,
size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_random_crop_via_cropped_window_and_resize(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.random_crop_via_cropped_window_and_resize,
size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_central_crop_via_cropped_window_and_resize_small_image(self):
ds = _fixture_dataset([((3, 3, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.central_crop_via_cropped_window_and_resize,
size=(2, 2)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((2, 2, 3), item.image.shape)
def test_random_crop_via_cropped_window_and_resize_small_image(self):
ds = _fixture_dataset([((3, 3, 3), 0)])
ds = ds.map(
functools.partial(
augmentations.random_crop_via_cropped_window_and_resize,
size=(2, 2)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((2, 2, 3), item.image.shape)
def test_central_crop(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.central_crop, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
def test_random_crop(self):
ds = _fixture_dataset([((30, 30, 3), 0)])
ds = ds.map(functools.partial(augmentations.random_crop, size=(20, 20)))
items = list(ds)
self.assertLen(items, 1)
for item in items:
self.assertEqual((20, 20, 3), item.image.shape)
@parameterized.parameters([
dict(image_shape=(224, 300), padding=0, expected=(0, 38, 224, 224)),
dict(image_shape=(300, 224), padding=0, expected=(38, 0, 224, 224)),
dict(image_shape=(224, 300), padding=16, expected=(16, 54, 192, 192)),
dict(image_shape=(300, 224), padding=16, expected=(54, 16, 192, 192)),
dict(image_shape=(32 + 1, 32 + 1), padding=16, expected=(16, 16, 1, 1)),
])
def test_central_crop_window(self, image_shape, padding, expected):
image_shape = tf.constant(image_shape, dtype=tf.int32)
bbox = augmentations.central_crop_window(image_shape, padding)
np.testing.assert_allclose(expected, bbox)
@parameterized.parameters([
dict(image_shape=(224, 300, 3)),
dict(image_shape=(224, 224, 3)),
dict(image_shape=(100, 10, 3)),
])
def test_random_sample_crop_window(self, image_shape):
windows = []
for i in range(100):
crop_window = augmentations.sample_random_crop_window(
tf.constant(image_shape), seed=i)
windows.append(tuple(w.numpy() for w in crop_window))
# Test that we see plenty of variety in the samples.
different_samples = set(windows)
assert len(different_samples) > 50
image_area = image_shape[0] * image_shape[1]
(min_area, max_area) = augmentations.AREA_RANGE
(min_aspect_ratio, max_aspect_ratio) = augmentations.ASPECT_RATIO_RANGE
sampled_min_area = min(w[2] * w[3] for w in windows)
sampled_max_area = max(w[2] * w[3] for w in windows)
sampled_min_aspect_ratio = min(w[3] / w[2] for w in windows)
sampled_max_aspect_ratio = min(w[3] / w[2] for w in windows)
self.assertLess(sampled_max_area / image_area, max_area + 1e-4)
self.assertGreater(sampled_min_area / image_area, min_area - 1e-4)
self.assertLess(sampled_max_aspect_ratio, max_aspect_ratio + 1e-4)
self.assertGreater(sampled_min_aspect_ratio, min_aspect_ratio - 1e-4)
def _fixture_dataset(
shapes_and_labels: Sequence[Tuple[Tuple[int, int, int], int]]
) -> tf.data.Dataset:
"""Constructs a fixture containing minibatches.
We round-trip the data via pngs, since this will result in shape tensors
that are not determined at graph compile time. This ensures that the tested
mappable functions work in graph mode, which is enforced by
tf.data.Dataset.map(...).
Args:
shapes_and_labels: The image shapes and label values to use for the
fixtures.
Returns:
A tensorflow dataset.
"""
def gen():
for shape, label in shapes_and_labels:
yield _encode_example(image=np.zeros(shape, dtype=np.uint8), label=label)
ds = tf.data.Dataset.from_generator(
gen,
output_signature={
'image': tf.TensorSpec(shape=(), dtype=tf.string),
'label': tf.TensorSpec(shape=(), dtype=tf.int32),
})
def to_minibatch(example) -> datasets.MiniBatch:
return datasets.MiniBatch(
image=tf.io.decode_png(example['image']),
label=example['label'],
multi_label_one_hot=None,
)
return ds.map(to_minibatch, deterministic=True)
def _encode_example(image: np.ndarray, label: int) -> Mapping[str, Any]:
"""Create a tf example using named fields."""
return {
'image': _encoded_png_feature(image),
'label': label,
}
def _encoded_png_feature(image: np.ndarray) -> bytes:
return tf.io.encode_png(image).numpy()
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_jax/training/augmentations_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate schedules for Nevis benchmarker."""
from typing import Any, Dict, NamedTuple, Sequence
from absl import logging
import optax
class ProgressAndScale(NamedTuple):
"""Combines a progress and scale.
Attributes:
progress: the progress (in the range [0, 1] through the full train loop).
scale: The learning rate scaling to apply once the given progress has been
completed.
"""
progress: float
scale: float
def piecewise_constant_progress_aligned_learning_rate_schedule(
init_value: float,
max_steps: int,
learning_progress_boundaries_and_scales: Sequence[ProgressAndScale],
) -> optax.Schedule:
"""Piece-wise constant learning rate depending on learning progress.
Args:
init_value: Initial value of the learning rate.
max_steps: Maximum number of training steps (batched weight updates).
learning_progress_boundaries_and_scales: A sequence of tuples `(progress,
scale)`, where `progress` indicates a the portion (in [0,1] range) of
`max_steps` at which learning rate is scaled by `scale`.
Returns:
Learning rate schedule function.
"""
boundaries_and_scales = {}
for (progress, scale) in learning_progress_boundaries_and_scales:
step = int(progress * max_steps)
boundaries_and_scales[step] = scale
logging.info('Using piecewise linear.\n Boundaries: \n%s',
boundaries_and_scales)
return optax.piecewise_constant_schedule(
init_value, boundaries_and_scales=boundaries_and_scales)
def constant_learning_rate_schedule(init_value: float) -> optax.Schedule:
"""Constant learning rate schedule."""
return lambda s: init_value
def warmup_cosine_decay_learning_rate_schedule(
initial_learning_rate: float, steps_per_epoch: int, max_steps: int,
warmup_epochs: int, final_learning_rate: float) -> optax.Schedule:
"""Warmup cosine learning rate schedule."""
# The warmup steps must be strictly less than the number of overall steps.
warmup_steps = min(max_steps - 1, warmup_epochs * steps_per_epoch)
logging.info(
'Cosine decay schedule: warmup: %d, max steps: %d',
warmup_steps,
max_steps,
)
return optax.warmup_cosine_decay_schedule(
init_value=0.0,
peak_value=initial_learning_rate,
warmup_steps=warmup_steps,
end_value=final_learning_rate,
decay_steps=max_steps)
def build_learning_rate_schedule(
learning_rate_schedule_name: str, initial_learning_rate: float,
steps_per_epoch: int, max_steps: int,
learning_rate_schedule_kwargs: Dict[str, Any]) -> optax.Schedule:
"""Creates a learning_rate_schedule function for given arguments.
This function assumes that `steps_per_epoch` and `max_steps` are not contained
in `learning_rate_schedule_kwargs`. The reason for this constraint is due to
the fact that these arguments could be dynamically recomputed on the learner
side depending on which dataset is used.
Args:
learning_rate_schedule_name: A name of a learning rate schedule.
initial_learning_rate: An initial value of the learning rate.
steps_per_epoch: Number of batched weight updates per epoch.
max_steps: Maximum number of batched weight updates for the training run.
learning_rate_schedule_kwargs: Dictionary containing additional arguments
for a given learning rate schedule.
Returns:
Learning rate schedule.
"""
if 'steps_per_epoch' in learning_rate_schedule_kwargs:
raise ValueError(
'`steps_per_epoch` must not be in `learning_rate_schedule_kwargs`.')
if 'max_steps' in learning_rate_schedule_kwargs:
raise ValueError(
'`max_steps` must not be in `learning_rate_schedule_kwargs`.')
if learning_rate_schedule_name == 'constant':
return constant_learning_rate_schedule(initial_learning_rate)
elif learning_rate_schedule_name == 'piecewise_constant_progress_aligned':
return piecewise_constant_progress_aligned_learning_rate_schedule(
initial_learning_rate, max_steps, **learning_rate_schedule_kwargs)
elif learning_rate_schedule_name == 'warmup_cosine_decay':
return warmup_cosine_decay_learning_rate_schedule(
initial_learning_rate, steps_per_epoch, max_steps,
**learning_rate_schedule_kwargs)
else:
raise ValueError(
f'Unsupported `learning_rate_schedule_name` = `{learning_rate_schedule_name}`'
)
|
dm_nevis-master
|
experiments_jax/training/learning_rate_schedules.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentations.
Augmentations are intended to be used in the context of a map function in a
tf.data.Dataset. This means that the functions must be applyable in tensorflow
graph mode [1]. To achieve this, any run-time variable shape must be managed
strictly using tensorflow functions (such as tf.cond(...)).
This can be tested using the test fixutres in the tests for this module.
[1]: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map
"""
import dataclasses
import functools
from typing import Any, Callable, Dict, Optional, Sequence, Tuple
from dm_nevis.benchmarker.datasets import datasets
import tensorflow as tf
AugmentFn = Callable[[datasets.MiniBatch], datasets.MiniBatch]
Kwargs = Dict[str, Any]
DEFAULT_PADDING = 0
# offset_y, offset_x, crop_height, crop_width
CropWindow = Tuple[int, int, int, int]
AREA_RANGE = (0.08, 1.0)
MAX_ATTEMPTS = 10
ASPECT_RATIO_RANGE = (3 / 4, 4 / 3)
MIN_OBJECT_COVERED = 0.1
def chain(
example: datasets.MiniBatch,
augmentation_ctors_with_kwargs: Sequence[Tuple[AugmentFn, Kwargs]]
) -> datasets.MiniBatch:
"""Applies data augmentations to example sequentially."""
for (ctor, kwargs) in augmentation_ctors_with_kwargs:
augmentation_fn = functools.partial(ctor, **kwargs)
example = augmentation_fn(example)
return example
def standardize_per_image(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Standardizes each image."""
image = tf.image.per_image_standardization(example.image)
return dataclasses.replace(example, image=image)
def random_flip(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Randomly flips each image."""
image = tf.image.random_flip_left_right(example.image)
return dataclasses.replace(example, image=image)
def normalize(example: datasets.MiniBatch) -> datasets.MiniBatch:
"""Ensures the images have 3 channels and are in range -1..1."""
# Images from nevis datasets are 0..255, however stored as int64.
# This confuses the other image-preprocessing functions => cast to uint8.
image = example.image
def true_fn():
# no-op for grayscale, results in correct shape for RGB
image_sliced = image[:, :, :1]
return tf.image.grayscale_to_rgb(image_sliced)
is_grayscale = tf.equal(tf.shape(image)[-1], 1)
image = tf.cond(
pred=is_grayscale,
true_fn=true_fn,
false_fn=lambda: image)
# Convert to range -1..1
image = tf.cast(image, dtype=tf.uint8)
image = 2 * tf.image.convert_image_dtype(image, dtype=tf.float32) - 1
return dataclasses.replace(example, image=image)
def _distorted_bounding_box_crop(
image_shape: tf.Tensor,
*,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
seed: Optional[int] = None,
) -> CropWindow:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True,
seed=seed)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
return (offset_y, offset_x, target_height, target_width)
def sample_random_crop_window(image_shape: tf.Tensor,
seed: Optional[int] = None) -> CropWindow:
"""Randomly samples a crop window, given an image size and config.
It may be that the random sampler is unable to satisfy the constraints given
(within an acceptable number of iterations). In this case, the sampler
falls back to returning the result of `pad_and_center_crop_window`, with the
default padding set.
Args:
image_shape: A tensor containing [image_height, image_width, channels].
seed: If specified, a random seed for sampling cropping window.
Returns:
A crop window [min y, min x, height, width] in image coordinates.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
crop_window = _distorted_bounding_box_crop(
image_shape,
bbox=bbox,
min_object_covered=MIN_OBJECT_COVERED,
aspect_ratio_range=ASPECT_RATIO_RANGE,
area_range=AREA_RANGE,
max_attempts=MAX_ATTEMPTS,
seed=seed)
# If the random crop failed, return the center crop.
if tf.reduce_all(tf.equal(image_shape[:2], crop_window[2:])):
crop_window = central_crop_window(image_shape)
return crop_window
def central_crop_window(image_shape: tf.Tensor,
padding: int = DEFAULT_PADDING) -> CropWindow:
"""Computes a crop window for a padded center crop of the given image shape.
Args:
image_shape: The shape of the jpeg [height, width, channels], or [height,
width].
padding: The padding between the input image and the resulting image. The
padding represents the distance between the input image and the output
image at each edge (so that the total number of pixels removed from the
smallest edge is 2 X the padding value.
Returns:
A crop window [y, x, image_size, image_size],
where image_size = min(height, width) - 2 * padding, and y and x are
chosen so that the resutling crop falls in the center of the input image.
"""
# Scrub the channels size, if it was provided.
image_shape = image_shape[:2]
min_image_side = tf.math.reduce_min(image_shape)
image_height = image_shape[0]
image_width = image_shape[1]
# If the padding is larger than the image, no pixels will be returned.
tf.debugging.assert_greater(min_image_side, 2 * padding)
offset_y = tf.cast((image_height - min_image_side) / 2, dtype=tf.int32)
offset_x = tf.cast((image_width - min_image_side) / 2, dtype=tf.int32)
image_size = tf.cast(min_image_side - 2 * padding, tf.int32)
return (offset_y + padding, offset_x + padding, image_size, image_size)
def central_crop_via_cropped_window_and_resize(
example: datasets.MiniBatch, size: Tuple[int, int]) -> datasets.MiniBatch:
"""Extract the central region of the image and resize to the given size."""
crop_window = central_crop_window(tf.shape(example.image))
cropped_image = tf.image.crop_to_bounding_box(example.image, *crop_window)
cropped_image = tf.image.resize(cropped_image, size=size)
return dataclasses.replace(example, image=cropped_image)
def random_crop_via_cropped_window_and_resize(
example: datasets.MiniBatch, size: Tuple[int, int]) -> datasets.MiniBatch:
"""Randomly samples a crop from the image and resize to the given size."""
crop_window = sample_random_crop_window(tf.shape(example.image))
cropped_image = tf.image.crop_to_bounding_box(example.image, *crop_window)
cropped_image = tf.image.resize(cropped_image, size=size)
return dataclasses.replace(example, image=cropped_image)
def central_crop(example: datasets.MiniBatch,
size: Tuple[int, int]) -> datasets.MiniBatch:
"""Extracts the central region of the image."""
height = tf.shape(example.image)[0]
width = tf.shape(example.image)[1]
tf.debugging.assert_equal(height, width)
fraction = size[0] / height
image = tf.image.central_crop(example.image, fraction)
return dataclasses.replace(example, image=image)
def random_crop(example: datasets.MiniBatch,
size: Tuple[int, int]) -> datasets.MiniBatch:
"""Randomly samples crops with `size`."""
height, width = size
n_channels = tf.shape(example.image)[-1]
image = tf.image.random_crop(example.image, (height, width, n_channels))
return dataclasses.replace(example, image=image)
def resize(example: datasets.MiniBatch, size: Tuple[int,
int]) -> datasets.MiniBatch:
"""Resizes the image to the given size."""
image = tf.image.resize(example.image, size)
return dataclasses.replace(example, image=image)
|
dm_nevis-master
|
experiments_jax/training/augmentations.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prediction heads."""
from typing import Dict, List, Optional, Set, Tuple, Protocol
import chex
import distrax
from dm_nevis.benchmarker.datasets import tasks
import haiku as hk
import jax
import jax.numpy as jnp
Loss = chex.Array
Metrics = Dict[str, chex.Array]
TaskKey = tasks.TaskKey
class Head(Protocol):
"""A Prediction head.
Heads combine a prediction layer to map some representation to a prediction
together with a loss function and other diagnostics appropriate for the
kind of prediction.
"""
def predict(
self,
inputs: chex.Array,
is_training: bool,
) -> List[distrax.Distribution]:
"""Generates a prediction given the representation `h`.
Args:
inputs: representation to derive predictions from.
is_training: bool
Returns:
A list over distribution objects representing the predictions (one for
each label).
"""
def loss_and_metrics(
self,
inputs: chex.Array,
targets: chex.Array,
is_training: bool = False,
) -> Tuple[Loss, Metrics]:
"""Evaluates the predictions given representations and ground-truth targets.
Args:
inputs: representation to derive predictions from.
targets: ground-truth to evaluate against.
is_training: bool
Returns:
A dictionary with per-example metrics and a scalar "loss".
"""
class CategoricalHead(hk.Module):
"""A categorical prediction head.
Encapsulates a linear layer to predict logits given a representation
and computes relevant metrics such as xent, error, expected-calibration-error
given ground truth labels.
"""
def __init__(self,
num_classes: int,
label_smoothing: float = 0.,
name: Optional[str] = None):
super().__init__(name=name)
self._num_classes = num_classes
self._label_smoothing = label_smoothing
self._logit_layer = hk.Linear(num_classes)
def predict(
self,
inputs: chex.Array,
is_training: bool = False,
) -> List[distrax.Categorical]:
"""Computes class probabilities given representations."""
del is_training
return [distrax.Categorical(logits=self._logit_layer(inputs))]
def loss_and_metrics(
self,
inputs: chex.Array,
targets: chex.Array,
is_training: bool = False,
) -> Tuple[Loss, Metrics]:
"""Computes loss and metrics given representations and target labels."""
chex.assert_rank(targets, 1) # [batch_size]
# Categorical distribuion
predictive_distribution = self.predict(inputs, is_training=is_training)[0]
if self._label_smoothing != 0 and is_training:
one_hot_targets = jax.nn.one_hot(targets, self._num_classes)
smoothed_targets = (one_hot_targets * (1 - self._label_smoothing) +
self._label_smoothing / self._num_classes)
neg_log_probs = -jax.nn.log_softmax(predictive_distribution.logits)
chex.assert_rank(neg_log_probs, 2) # [batch_size, num_classes]
xent = jnp.sum(smoothed_targets * neg_log_probs, axis=1)
else:
xent = -predictive_distribution.log_prob(targets)
predicted_labels = predictive_distribution.mode()
error = jnp.not_equal(predicted_labels, targets).astype(jnp.float32)
loss = jnp.mean(xent)
return (loss, {"loss": loss, "xent": xent, "error": error})
class MultiLabelHead(hk.Module):
"""A binary multi-label prediction head.
Encapsulates a linear layer to predict logits given a representation
and computes relevant metrics such as cross entropy, error,
expected-calibration-error given ground truth labels.
"""
def __init__(self,
num_classes: int,
label_smoothing: float = 0.,
name: Optional[str] = None):
super().__init__(name=name)
self._num_classes = num_classes
self._label_smoothing = label_smoothing
self._logit_layer = hk.Linear(num_classes)
def predict(self,
inputs: chex.Array,
is_training: bool = False) -> List[distrax.Bernoulli]:
"""Computes class logits given representations."""
del is_training
logits = self._logit_layer(inputs)
output_distributions = []
for i in range(self._num_classes):
output_distributions.append(distrax.Bernoulli(logits=logits[:, i]))
return output_distributions
def loss_and_metrics(
self,
inputs: chex.Array,
targets: chex.Array,
is_training: bool = False,
) -> Tuple[Loss, Metrics]:
"""Computes loss and metrics given representations and target labels."""
chex.assert_rank(targets, 2) # [batch_size, num_classes]
# Product of independent Bernoulli.
predictive_distributions = self.predict(inputs, is_training=is_training)
cross_entropies = []
predicted_labels = []
errors = []
for i, predictive_distribution in enumerate(predictive_distributions):
expected_label = targets[:, i]
if self._label_smoothing != 0 and is_training:
smoothed_targets = (expected_label * (1 - self._label_smoothing) +
self._label_smoothing / 2)
cross_entropies.append(
-predictive_distribution.log_prob(smoothed_targets))
else:
cross_entropies.append(
-predictive_distribution.log_prob(expected_label))
predicted_label = predictive_distribution.mode()
predicted_labels.append(predicted_label)
error = jnp.not_equal(predicted_label, expected_label).astype(jnp.float32)
errors.append(error)
cross_entropies = jnp.stack(cross_entropies, axis=-1)
error = jnp.mean(jnp.stack(errors, axis=-1), axis=-1)
loss = jnp.mean(cross_entropies)
return (loss, {"loss": loss, "xent": cross_entropies, "error": error})
def build_head(task_keys: Set[TaskKey], **kwargs) -> Head:
"""Builds an appropriate head for the given task."""
assert len(task_keys) == 1
task_key = list(task_keys)[0]
task_kind = task_key.kind
if task_kind == tasks.TaskKind.CLASSIFICATION:
return CategoricalHead(
num_classes=task_key.metadata.num_classes, name=f"{task_key.name}_head",
**kwargs)
elif task_kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
return MultiLabelHead(
num_classes=task_key.metadata.num_classes, name=f"{task_key.name}_head",
**kwargs)
else:
raise ValueError(f"Unsupported task kind: {task_kind}")
|
dm_nevis-master
|
experiments_jax/training/heads.py
|
dm_nevis-master
|
experiments_jax/learners/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A learner implemented for the baseline."""
import dataclasses
import functools
from typing import Iterable, Optional, Tuple
from absl import logging
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.learners import learner_interface
from experiments_jax import experiment
import ml_collections
import numpy as np
import tensorflow_datasets as tfds
def learner_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[experiment.LearnerBuilderFn, experiment.ProgramStopper]:
"""Prepares the learner to run on launchpad."""
del learner_config
def _learner_builder():
dataset_lookup, _ = dataset_lookup_builder()
return build_example_learner(dataset_lookup)
def _stopper():
return
return _learner_builder, _stopper
@dataclasses.dataclass(frozen=True)
class ExampleLearnerState:
"""The state for the example learner."""
def build_example_learner(
dataset_lookup: experiment.DatasetLookupFn) -> learner_interface.Learner:
return learner_interface.Learner(
init=init,
train=functools.partial(train, dataset_lookup=dataset_lookup),
predict=functools.partial(predict, dataset_lookup=dataset_lookup),
)
def init() -> ExampleLearnerState:
return ExampleLearnerState()
def train(
event: streams.TrainingEvent,
state: ExampleLearnerState,
write_checkpoint: learner_interface.CheckpointFn,
*,
checkpoint_to_resume: Optional[learner_interface.Checkpoint] = None,
dataset_lookup: experiment.DatasetLookupFn,
) -> Tuple[ExampleLearnerState, learner_interface.ResourceUsage]:
"""Trains the learner given the given dataset."""
del write_checkpoint, checkpoint_to_resume
dataset = dataset_lookup(event.train_dataset_key)
logging.info("Got train task: %s with %s examples", dataset.task_key,
dataset.num_examples)
return state, learner_interface.ResourceUsage(
floating_point_operations=0.0,
peak_parameter_count=0,
peak_parameter_size_bytes=0)
def predict(
event: streams.PredictionEvent,
state: ExampleLearnerState,
*,
dataset_lookup: experiment.DatasetLookupFn,
) -> Iterable[learner_interface.Predictions]:
"""Computes predictions for each example in the referenced dataset."""
del state
dataset = dataset_lookup(event.dataset_key)
logging.info("Got predict task: %s with %s examples", dataset.task_key,
dataset.num_examples)
batch_size = 1
ds = dataset.builder_fn(shuffle=False).batch(batch_size=batch_size)
for batch in tfds.as_numpy(ds):
# For now, we make empty predictions.
if dataset.task_key.kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
output = [
np.zeros((batch_size,))
for _ in range(dataset.task_key.metadata.num_classes)
]
elif dataset.task_key.kind == tasks.TaskKind.CLASSIFICATION:
output = [np.zeros((batch_size, dataset.task_key.metadata.num_classes))]
else:
raise ValueError(f"Unknown task kind: {dataset.task_key.kind}")
yield learner_interface.Predictions(batch=batch, output=output)
|
dm_nevis-master
|
experiments_jax/learners/example/example_learner.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_jax/learners/example/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A finetuning learner.
This learner supports a number of strategies for initializing the train state
for each sequential training task. One such strategy is "independent". In This
case, each model is trained independently.
"""
import dataclasses
import enum
import functools
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple
from absl import logging
import chex
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.learners import learner_interface
from experiments_jax import experiment
from experiments_jax.training import dataloaders
from experiments_jax.training import learning_rate_schedules
from experiments_jax.training import models
from experiments_jax.training import resources
from experiments_jax.training import trainer
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
CHECKPOINT_INTERVAL = 10_000
LOG_INTERVAL_SECONDS = 5
MAX_LR_DECAY_STEPS = 4
FINETUNING_DATAFRAME_NAME = "finetuning"
DUMMY_TASK_NAME_RANDOM_PARAMS = "randomly_initialized_params"
SUPPORTED_TASK_KINDS = frozenset([
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
])
class FinetuningStrategy(enum.Enum):
INDEPENDENT = 0 # Randomly initialize the state for each model.
PREVIOUS = 1 # Always initialize from train state from previous task.
def learner_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[experiment.LearnerBuilderFn, experiment.ProgramStopper]:
"""Prepares the learner to run on launchpad."""
def stopper():
logging.info("Closing program")
def build_learner():
dataset_lookup, task_keys = dataset_lookup_builder()
return build_finetuning_learner(learner_config, dataset_lookup, task_keys)
return build_learner, stopper
@dataclasses.dataclass(frozen=True)
class TrainingContext:
train_task_index: int
config: ml_collections.ConfigDict
event: streams.TrainingEvent
prng_seed: chex.PRNGKey = dataclasses.field(repr=False)
initial_train_state: Optional[trainer.TrainState] = dataclasses.field(
repr=False)
@dataclasses.dataclass(frozen=True)
class FinetuningLearnerState:
"""A dataclass to hold the state of the learner.
Attributes:
rng: The state of the PRNG.
seen_train_events: The (ordered) sequence of train events encountered by
this learner.
train_states: The sequence of tasks and the achieved final train states or
the checkpoint paths to the train states.
"""
rng: chex.PRNGKey
seen_train_events: List[streams.TrainingEvent]
train_states: List[Tuple[tasks.TaskKey, str]]
def build_finetuning_learner(
config: ml_collections.ConfigDict,
dataset_lookup: experiment.DatasetLookupFn,
task_keys: Sequence[tasks.TaskKey],
) -> learner_interface.Learner:
"""Builds the learner.
Args:
config: The configuration to use for this learner.
dataset_lookup: A function used to construct train and predict datasets.
task_keys: The tasks that the returned learner should support.
Returns:
A learner satisfying the learner_interface.Learner interface.
"""
_verify_all_tasks_supported(task_keys)
model = _build_model(config, task_keys)
finetuning_metrics = config.get_metrics_writer("finetuning")
cost_function = _cost_function_builder(dataset_lookup, task_keys)
return learner_interface.Learner(
init=functools.partial(
init,
config=config,
),
train=functools.partial(
train,
dataset_lookup=dataset_lookup,
config=config,
cost_function=cost_function,
finetuning_metrics=finetuning_metrics,
),
predict=functools.partial(
predict,
config=config,
model=model,
dataset_lookup=dataset_lookup,
),
)
def init(
*,
config: ml_collections.ConfigDict,
) -> learner_interface.LearnerState:
"""A function to initialize the train state for the learner.
Args:
config: The learner configuration.
Returns:
The initial learner state, before the learner has seen any training data.
"""
return FinetuningLearnerState(
rng=jax.random.PRNGKey(config.prng_seed),
seen_train_events=[],
train_states=[],
)
def train(
event: streams.TrainingEvent,
state: learner_interface.LearnerState,
write_checkpoint: learner_interface.CheckpointFn,
checkpoint_to_resume: learner_interface.Checkpoint = None,
*,
dataset_lookup: experiment.DatasetLookupFn,
config: ml_collections.ConfigDict,
cost_function: Any,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Tuple[learner_interface.LearnerState, learner_interface.ResourceUsage]:
"""Trains the learner given the given dataset.
Args:
event: The training event that the learner should read training data from.
state: The learner's state before training began.
write_checkpoint: A function to write intermediate checkpoints during this
training event.
checkpoint_to_resume: If this training event was previously interrupted,
then this training event may be initialized from a checkpoint that was
previously written by the write_checkpoint function.
dataset_lookup: A lookup function for fetching the dataset by key.
config: The learner config.
cost_function: The function optimizing the model.
finetuning_metrics: A metrics writer for writing the selected state that was
finetuned from.
Returns:
A new learner state, containing the knowledge accrued during training, along
with the resources used during training.
"""
del checkpoint_to_resume
task_key = dataset_lookup(event.train_dataset_key).task_key
initial_train_state = _get_train_state_for_finetuning(config, task_key, state,
finetuning_metrics)
rng, key = jax.random.split(state.rng)
context = TrainingContext(
train_task_index=len(state.seen_train_events),
config=config,
event=event,
prng_seed=key,
initial_train_state=initial_train_state,
)
_, train_state_np, resources_used = cost_function(
context, write_checkpoint=write_checkpoint)
train_state_checkpoint_path = os.path.join(
config.train_states_checkpoint_path,
f"train_task_index_{len(state.seen_train_events)}_{task_key.name}.pkl")
trainer.save_train_state(train_state_checkpoint_path, task_key,
train_state_np)
return (
dataclasses.replace(
state,
rng=rng,
train_states=[
*state.train_states, (task_key, train_state_checkpoint_path)
],
seen_train_events=[*state.seen_train_events, event],
),
resources_used,
)
def predict(
event: streams.PredictionEvent,
state: learner_interface.LearnerState,
*,
config: ml_collections.ConfigDict,
model: models.Model,
dataset_lookup: experiment.DatasetLookupFn,
) -> Iterable[learner_interface.Predictions]:
"""Compute predictions for each example in the referenced dataset.
Args:
event: An event containing a dataset key to compute predictions for.
state: The state of the learner, containing all knowledge accrued by the
learner as it was exposed to previous training events.
config: The config of the learner.
model: A model implementing the underlying architecture of the learner.
dataset_lookup: A function to fetch datasets by key.
Yields:
Batches of predictions from the model, given the learner state, over the
dataset loaded from the event.
"""
dataset = dataset_lookup(event.dataset_key)
task_key = dataset.task_key
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
batch_iter = dataloaders.build_prediction_iterator(dataset, eval_augment_fn,
config.batch_size)
train_state = _get_latest_train_state_for_predictions(state, task_key)
params = hk.data_structures.merge(train_state.trainable_params,
train_state.frozen_params)
@jax.jit
def compute_predictions(rng_key, image):
return model.predict[task_key](params, train_state.state, rng_key, image,
False)[0]
rng_seq = hk.PRNGSequence(train_state.rng)
completed = 0
for batch in batch_iter():
logging.log_every_n_seconds(logging.INFO, "Completed predictions: %d/%d",
10, completed, dataset.num_examples)
completed += batch.image.shape[0]
output = compute_predictions(next(rng_seq), batch.image)
output = jax.tree_map(np.array, output)
yield learner_interface.Predictions(batch=batch, output=output)
@dataclasses.dataclass
class FitWithEarlyStoppingState:
step: int
train_state: trainer.TrainState
best_age: int
best_metric: np.number
best_train_state: Optional[trainer.TrainState]
lr_decay_steps: int
lr_decay_scale: np.number
validation_metric: str
def _cost_function_builder(
dataset_lookup_fn: experiment.DatasetLookupFn,
task_keys: Sequence[tasks.TaskKey],
) -> Any:
"""Construct the cost function used in the hyper search."""
def cost_function(train_context: TrainingContext,
*,
write_checkpoint,
checkpoint_to_resume=None):
logging.info("Detected devices: %s", jax.devices())
config = train_context.config
logging.info("Computing cost function with learner config: %s", config)
tf.config.set_visible_devices([], "GPU")
model = _build_model(config, task_keys)
prng = hk.PRNGSequence(train_context.prng_seed)
# Data for work-unit
train_dataset = dataset_lookup_fn(train_context.event.train_dataset_key)
valid_dataset = dataset_lookup_fn(train_context.event.dev_dataset_key)
task_key = train_dataset.task_key
train_augment_fn = functools.partial(config.augmentations.train.ctor,
**config.augmentations.train.kwargs)
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
train_iter_fn = dataloaders.build_train_iterator(train_dataset,
train_augment_fn,
config.batch_size)
valid_iter_fn = dataloaders.build_prediction_iterator(
valid_dataset, eval_augment_fn, config.batch_size)
steps_per_epoch = train_dataset.num_examples // config.batch_size + 1
# If learning rate schedule is provided, we use it.
learning_rate_schedule = learning_rate_schedules.build_learning_rate_schedule(
config.optimization.learning_rate_schedule.name,
config.optimization.learning_rate_schedule.init_learning_rate,
steps_per_epoch, config.max_steps,
config.optimization.learning_rate_schedule.kwargs)
if "learning_rate" in config.optimization.optimizer.kwargs:
raise ValueError(
"`learning_rate` argument must not be specified in the optimizer as"
" it would be overridden by the learning rate schedule.")
optimizer = config.optimization.optimizer.ctor(
learning_rate=learning_rate_schedule,
**config.optimization.optimizer.kwargs)
update_fn = trainer.build_update_fn(task_key, model, optimizer)
initial_train_state = _initialize_train_from_context(
train_context, config, prng, model, optimizer)
opt_state = optimizer.init(initial_train_state.trainable_params)
initial_train_state = dataclasses.replace(
initial_train_state, opt_state=opt_state)
train_metric_writer = config.get_metrics_writer(
"learner_train", index_of_training_event=train_context.train_task_index)
eval_metric_writer = config.get_metrics_writer(
"learner_eval", index_of_training_event=train_context.train_task_index)
cost, _, train_state, flops_used = fit_with_early_stopping(
initial_train_state=initial_train_state,
update_fn=jax.jit(update_fn),
loss_and_metrics_fn=model.loss_and_metrics[task_key],
train_iter_fn=train_iter_fn,
valid_iter_fn=valid_iter_fn,
validation_metric=config.validation_metric,
run_validation_every_n_steps=config.run_validation_every_n_steps,
early_stopping_grace=config.early_stopping_grace,
max_steps=config.max_steps,
train_metrics_writer=train_metric_writer,
validation_metrics_writer=eval_metric_writer,
write_checkpoint=write_checkpoint,
checkpoint_to_resume=checkpoint_to_resume)
resources_used = learner_interface.ResourceUsage(
floating_point_operations=flops_used)
train_metric_writer.flush()
train_metric_writer.close()
eval_metric_writer.flush()
eval_metric_writer.close()
# train states are converted to numpy before returning, since JAX arrays
# automatically get sent to GPU / TPU memory when they are unpickled, which
# we can cause devices to run out of memory.
train_state_np = jax.tree_map(np.asarray, train_state)
return cost, train_state_np, resources_used
return cost_function
def _initialize_train_from_context(train_context, config, prng, model,
optimizer):
"""Initialize trainer state based on the context."""
if train_context.initial_train_state is not None:
logging.info("Initializing train state from a previous state")
return train_context.initial_train_state
else:
logging.info("Initializing a new train state")
load_params_fun = config.load_params_fn
if "load_params_fn_with_kwargs" in config:
load_params_fun = functools.partial(
config.load_params_fn_with_kwargs.fun,
**config.load_params_fn_with_kwargs.kwargs)
return trainer.init_train_state(
next(prng), model, optimizer, load_params_fun)
def _run_validation(
state: FitWithEarlyStoppingState,
valid_data_iter: Iterator[datasets.MiniBatch],
loss_and_metrics_fn: models.LossAndMetricsFn,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any]]:
"""Runs validation and returns the cost and metrics."""
start_time = time.monotonic()
metrics = _validate_batches(state.train_state, loss_and_metrics_fn,
valid_data_iter)
elapsed = time.monotonic() - start_time
metrics = jax.tree_map(np.mean, metrics)
metrics.update(
step=state.step,
validation_runtime_seconds=elapsed,
lr_decay_scale=state.lr_decay_scale,
lr_decay_steps=state.lr_decay_steps,
)
if additional_diagnostics:
metrics.update(additional_diagnostics)
logging.info(
"Validation completed in %.3f seconds.\n"
"Validation metrics for step %d:\n%s", elapsed, state.step,
"\n".join(f" {k}: {_prettify_value(v)}" for k, v in metrics.items()))
return float(metrics[state.validation_metric]), metrics
def _validate_batches(
train_state: trainer.TrainState,
loss_and_metrics_fn: models.LossAndMetricsFn,
batch_iter: dataloaders.BatchIterator,
) -> Dict[str, float]:
"""Perform a validation run and report the metrics computed."""
rng = jax.random.PRNGKey(0)
params = hk.data_structures.merge(train_state.trainable_params,
train_state.frozen_params)
all_diagnostics = []
for batch in batch_iter:
# If the task has a single label, then batch.label points to an array. If
# the task is binary multinomial, then this slot is not set. In that case,
# we get the label from batch.multi_label_one_hot which is a matrix with
# binary values.
targets = batch.label
if targets is None:
targets = batch.multi_label_one_hot
(_, diagnostics), _ = loss_and_metrics_fn(params, train_state.state, rng,
batch.image, targets, False)
diagnostics = jax.tree_map(lambda x: x.mean(), diagnostics)
all_diagnostics.append(diagnostics)
return jax.tree_map(lambda *x: np.array(x).mean(), *all_diagnostics)
def fit_with_early_stopping(
initial_train_state: trainer.TrainState,
update_fn: trainer.UpdateFn,
loss_and_metrics_fn: models.LossAndMetricsFn,
train_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
valid_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
validation_metric: str,
run_validation_every_n_steps: int,
early_stopping_grace: int,
max_steps: int,
train_metrics_writer: datawriter_interface.DataWriter,
validation_metrics_writer: datawriter_interface.DataWriter,
write_checkpoint: Callable[[FitWithEarlyStoppingState], None],
checkpoint_to_resume: Optional[FitWithEarlyStoppingState] = None,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any], trainer.TrainState, float]:
"""Fit model with early stopping and dynamic LR schduling."""
additional_diagnostics = additional_diagnostics or {}
if checkpoint_to_resume is None:
logging.info("Starting new train loop...")
state = FitWithEarlyStoppingState( # pytype: disable=wrong-arg-types # mapping-is-not-sequence
step=0,
best_age=0,
best_metric=np.inf,
train_state=initial_train_state,
best_train_state=None,
lr_decay_steps=0,
lr_decay_scale=jnp.ones([]),
validation_metric=validation_metric,
)
else:
logging.info("Resuming train loop from checkpoint...")
state: FitWithEarlyStoppingState = checkpoint_to_resume
step_timer = trainer.StepCountEstimator()
train_iter = train_iter_fn()
while state.step < max_steps:
start_time = time.monotonic()
batch = next(train_iter)
logging.log_every_n_seconds(logging.INFO,
"Step: %d/%d, Batch %s, Steps per second: %f",
LOG_INTERVAL_SECONDS, state.step + 1, max_steps,
batch, step_timer.estimated_steps_per_second())
state.train_state, train_metrics = update_fn(batch, state.train_state,
state.lr_decay_scale)
train_metrics = jax.tree_map(jnp.mean, train_metrics)
train_metrics.update(
step=state.step,
steps_per_second=step_timer.estimated_steps_per_second(),
lr_decay_scale=state.lr_decay_scale,
lr_decay_steps=state.lr_decay_steps,
)
train_metrics.update(additional_diagnostics)
train_metrics_writer.write(train_metrics)
if state.step % CHECKPOINT_INTERVAL == 0 and state.step != 0:
logging.info("Writing checkpoint at step %d", state.step)
write_checkpoint(state)
if state.step % run_validation_every_n_steps == 0:
validation_metric, valid_metrics = _run_validation(
state, valid_iter_fn(), loss_and_metrics_fn)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = state.train_state
state.best_age = 0
else:
state.best_age += 1
if state.best_age >= early_stopping_grace:
if state.lr_decay_steps <= MAX_LR_DECAY_STEPS:
logging.info("Validation metrics plateaued, halfing learning rate.")
state.best_age = 0
state.lr_decay_steps += 1
state.lr_decay_scale /= 2
else:
logging.info("Validation metrics plateaued, stopping training.")
break
step_timer.add_measurement(time.monotonic() - start_time)
state.step += 1
logging.info("Running final validation.")
validation_metric, valid_metrics = _run_validation(state, valid_iter_fn(),
loss_and_metrics_fn)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = state.train_state
state.best_age = 0
# TODO: Take validation FLOPs into account
train_flops = state.step * resources.estimate_flops(update_fn, batch,
state.train_state)
return validation_metric, valid_metrics, state.best_train_state, train_flops
def _get_train_state_for_finetuning(
config: ml_collections.ConfigDict,
task_key: tasks.TaskKey,
state: FinetuningLearnerState,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Optional[trainer.TrainState]:
"""Optionally returns a train state to fine tune from."""
if config.finetuning.strategy is FinetuningStrategy.INDEPENDENT:
logging.info("For independent training, no initial train state is used %s",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
elif config.finetuning.strategy is FinetuningStrategy.PREVIOUS:
if not state.train_states:
logging.info(
"Finetuning enabled for %s, but there are no previous tasks.",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
else:
source_task, train_state_checkpoint_path = state.train_states[-1]
logging.info("Finetuning %s from previous task: %s.", task_key,
source_task)
train_state = trainer.restore_train_state(train_state_checkpoint_path)
_write_finetuning_entry(finetuning_metrics, state, task_key, source_task)
return train_state
raise ValueError(f"Unsupported strategy: {config.finetuning_strategy}")
def _verify_all_tasks_supported(task_keys: Iterable[tasks.TaskKey]) -> None:
unsupported_tasks = set(key.kind for key in task_keys) - SUPPORTED_TASK_KINDS
if unsupported_tasks:
raise NotImplementedError(
f"Got unsupported tasks: {unsupported_tasks}. "
"If required, you may use streams.FilteredStream "
"to construct a stream that removes cetain tasks.")
def _get_latest_train_state_for_predictions(
state: FinetuningLearnerState,
task_key: tasks.TaskKey) -> trainer.TrainState:
for key, train_state_checkpoint_path in reversed(state.train_states):
if key == task_key:
return trainer.restore_train_state(train_state_checkpoint_path)
raise ValueError(
f"Cannot compute predicions for task that has not been trained: {task_key}"
)
def _build_model(config: ml_collections.ConfigDict,
task_keys: Sequence[tasks.TaskKey]) -> models.Model:
"""Constructs the parameterized, trainable model."""
# In this learner, every task has its own set of parameters, and
# so the backbone should be identical for all heads.
return models.build_model(
functools.partial(config.model.ctor, **config.model.kwargs),
supported_tasks=task_keys,
image_resolution=config.image_resolution,
head_kwargs={"label_smoothing": config.label_smoothing})
def _write_finetuning_entry(
finetuning_metrics: datawriter_interface.DataWriter,
state: FinetuningLearnerState,
current_task: tasks.TaskKey,
finetune_from_task: Optional[tasks.TaskKey],
) -> None:
"""Write the selected task to finetune from."""
if finetune_from_task:
finetune_from_task_name = finetune_from_task.name
else:
finetune_from_task_name = None
finetuning_metrics.write({
"index_of_train_event": len(state.train_states),
"current_task": current_task.name,
"finetune_from_task": finetune_from_task_name,
})
def _prettify_value(value):
try:
return f"{value:.2f}"
except ValueError:
return f"{value}"
|
dm_nevis-master
|
experiments_jax/learners/finetuning/finetuning_learner.py
|
dm_nevis-master
|
experiments_jax/learners/finetuning/__init__.py
|
|
dm_nevis-master
|
experiments_jax/learners/finetuning_dknn/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A finetuning learner.
This learner supports a number of strategies for initializing the train state
for each sequential training task. One such strategy is "independent". In This
case, each model is trained independently.
"""
import copy
import dataclasses
import enum
import functools
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple
from absl import logging
import chex
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.environment import logging_writer
from dm_nevis.benchmarker.learners import learner_interface
from experiments_jax import experiment
from experiments_jax.training import dataloaders
from experiments_jax.training import hyperparameter_searcher
from experiments_jax.training import learning_rate_schedules
from experiments_jax.training import models
from experiments_jax.training import resources
from experiments_jax.training import trainer
from experiments_jax.training import transfer_matrix_from_knn
from experiments_jax.training import transfer_oracle
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
import tensorflow as tf
CHECKPOINT_INTERVAL = 10_000
LOG_INTERVAL_SECONDS = 5
MAX_LR_DECAY_STEPS = 4
DUMMY_TASK_NAME_RANDOM_PARAMS = "randomly_initialized_params"
SUPPORTED_TASK_KINDS = frozenset([
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
])
SearchSpace = Iterable[hyperparameter_searcher.Overrides]
class FinetuningStrategy(enum.Enum):
INDEPENDENT = 0 # Randomly initialize the state for each model.
PREVIOUS = 1 # Always initialize from train state from previous task.
TRANSFER_MATRIX = 2 # Oracle based on pairwise transfer matrix
DYNAMIC_KNN_TRANSFER_MATRIX = 3 # Estimate task transfer matrix using a KNN.
class BatchSizeStrategy(enum.Enum):
"""The strategy for selecting the training batch size.
Attributes:
FIXED: The fixed batch size strategy always uses the size directly from the
config.
ADAPTIVE: The batch size is proportional to the dataset size.
"""
FIXED = "fixed"
ADAPTIVE = "adaptive"
def learner_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[experiment.LearnerBuilderFn, experiment.ProgramStopper]:
"""Prepares the learner to run on launchpad."""
def cost_function_builder():
return _cost_function_builder(dataset_lookup_builder)
workers = learner_config.distributed_worker_builder(
cost_function_builder,
num_workers=learner_config.num_workers,
)
def stopper():
logging.info("Exiting...")
def build_learner():
dataset_lookup, task_keys = dataset_lookup_builder()
return build_finetuning_learner(
learner_config,
dataset_lookup,
task_keys,
workers,
)
return build_learner, stopper
@dataclasses.dataclass(frozen=True)
class TrainingContext:
train_task_index: int
config: ml_collections.ConfigDict
event: streams.TrainingEvent
prng_seed: chex.PRNGKey = dataclasses.field(repr=False)
initial_train_state: Optional[trainer.TrainState] = dataclasses.field(
repr=False)
@dataclasses.dataclass(frozen=True)
class FinetuningLearnerState:
"""A dataclass to hold the state of the learner.
Attributes:
rng: The state of the PRNG.
seen_train_events: The (ordered) sequence of train events encountered by
this learner.
train_states: The sequence of tasks and the achieved final train states or
the checkpoint paths to the train states.
"""
rng: chex.PRNGKey
seen_train_events: List[streams.TrainingEvent]
train_states: List[Tuple[tasks.TaskKey, str]]
def build_finetuning_learner(
config: ml_collections.ConfigDict,
dataset_lookup: experiment.DatasetLookupFn,
task_keys: Sequence[tasks.TaskKey],
workers: hyperparameter_searcher.HyperParameterTunerWorkers,
) -> learner_interface.Learner:
"""Builds the learner.
Args:
config: The configuration to use for this learner.
dataset_lookup: A function used to construct train and predict datasets.
task_keys: The tasks that the returned learner should support.
workers: Workers for the hyper parameter tuner.
Returns:
A learner satisfying the learner_interface.Learner interface.
"""
_verify_all_tasks_supported(task_keys)
searcher = hyperparameter_searcher.HyperparameterTuner(workers)
model = _build_model(config, task_keys)
finetuning_metrics = _finetuning_metrics_writer()
return learner_interface.Learner(
init=functools.partial(
init,
config=config,
),
train=functools.partial(
train,
dataset_lookup=dataset_lookup,
config=config,
searcher=searcher,
model=model,
finetuning_metrics=finetuning_metrics,
),
predict=functools.partial(
predict,
config=config,
model=model,
dataset_lookup=dataset_lookup,
),
)
def init(
*,
config: ml_collections.ConfigDict,
) -> learner_interface.LearnerState:
"""A function to initialize the train state for the learner.
Args:
config: The learner configuration.
Returns:
The initial learner state, before the learner has seen any training data.
"""
return FinetuningLearnerState(
rng=jax.random.PRNGKey(config.prng_seed),
seen_train_events=[],
train_states=[],
)
def train(
event: streams.TrainingEvent,
state: learner_interface.LearnerState,
write_checkpoint: learner_interface.CheckpointFn,
*,
dataset_lookup: experiment.DatasetLookupFn,
checkpoint_to_resume: learner_interface.Checkpoint = None,
config: ml_collections.ConfigDict,
searcher: hyperparameter_searcher.HyperparameterTuner,
model: models.Model,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Tuple[learner_interface.LearnerState, learner_interface.ResourceUsage]:
"""Trains the learner given the given dataset.
Args:
event: The training event that the learner should read training data from.
state: The learner's state before training began.
write_checkpoint: A function to write intermediate checkpoints during this
training event.
dataset_lookup: A lookup function for fetching the dataset by key.
checkpoint_to_resume: If this training event was previously interrupted,
then this training event may be initialized from a checkpoint that was
previously written by the write_checkpoint function.
config: The learner config.
searcher: A hyper parameter searcher.
model: The model.
finetuning_metrics: A metrics writer for writing the selected state that was
finetuned from.
Returns:
A new learner state, containing the knowledge accrued during training, along
with the resources used during training.
"""
task_key = dataset_lookup(event.train_dataset_key).task_key
initial_train_state = _get_train_state_for_finetuning(config, task_key, state,
event, dataset_lookup,
model,
finetuning_metrics)
rng, key = jax.random.split(state.rng)
context = TrainingContext(
train_task_index=len(state.seen_train_events),
config=config,
event=event,
prng_seed=key,
initial_train_state=initial_train_state,
)
search_space = _build_search_space(
config.search_space_creator,
seed=len(state.seen_train_events),
num_trials=config.trials_per_task)
result = searcher.minimize(
context=context,
search_space=search_space,
checkpoint_to_resume=checkpoint_to_resume,
write_checkpoint=write_checkpoint,
)
logging.info("Min-cost solution: %s, %s", result.cost, result.overrides)
train_state_checkpoint_path = os.path.join(
config.train_states_checkpoint_path,
f"train_task_index_{len(state.seen_train_events)}_{task_key.name}.pkl")
trainer.save_train_state(train_state_checkpoint_path, task_key,
result.outputs)
return (
dataclasses.replace(
state,
rng=rng,
train_states=[
*state.train_states, (task_key, train_state_checkpoint_path)
],
seen_train_events=[*state.seen_train_events, event],
),
result.resources_used,
)
def predict(
event: streams.PredictionEvent,
state: learner_interface.LearnerState,
*,
config: ml_collections.ConfigDict,
model: models.Model,
dataset_lookup: experiment.DatasetLookupFn,
) -> Iterable[learner_interface.Predictions]:
"""Compute predictions for each example in the referenced dataset.
Args:
event: An event containing a dataset key to compute predictions for.
state: The state of the learner, containing all knowledge accrued by the
learner as it was exposed to previous training events.
config: The config of the learner.
model: A model implementing the underlying architecture of the learner.
dataset_lookup: A function to fetch datasets by key.
Yields:
Batches of predictions from the model, given the learner state, over the
dataset loaded from the event.
"""
dataset = dataset_lookup(event.dataset_key)
task_key = dataset.task_key
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
batch_iter = dataloaders.build_prediction_iterator(
dataset, eval_augment_fn, config.batch.kwargs.batch_size)
train_state = _get_latest_train_state_for_predictions(state, task_key)
params = hk.data_structures.merge(train_state.trainable_params,
train_state.frozen_params)
@jax.jit
def compute_predictions(rng_key, image):
return model.predict[task_key](params, train_state.state, rng_key, image,
False)[0]
rng_seq = hk.PRNGSequence(train_state.rng)
completed = 0
for batch in batch_iter():
logging.log_every_n_seconds(logging.INFO, "Completed predictions: %d/%d",
10, completed, dataset.num_examples)
completed += batch.image.shape[0]
output = compute_predictions(next(rng_seq), batch.image)
output = jax.tree_map(np.array, output)
yield learner_interface.Predictions(batch=batch, output=output)
@dataclasses.dataclass
class FitWithEarlyStoppingState:
step: int
train_state: trainer.TrainState
best_age: int
best_metric: np.number
best_train_state: Optional[trainer.TrainState]
lr_decay_steps: int
lr_decay_scale: np.number
validation_metric: str
overrides: Sequence[hyperparameter_searcher.Overrides]
def _cost_function_builder(
dataset_lookup_builder: experiment.DatasetLookupBuilderFn) -> Any:
"""Construct the cost function used in the hyper search."""
def cost_function(context,
overrides,
*,
write_checkpoint,
checkpoint_to_resume=None):
logging.info("Detected devices: %s", jax.devices())
logging.info("Training model with overrides %s", overrides)
train_context: TrainingContext = context
del context
base_config = train_context.config
logging.info("Applying config overrides: %s", overrides)
config = _apply_overrides(base_config, overrides)
logging.info("Computing cost function with learner config: %s", config)
tf.config.set_visible_devices([], "GPU")
dataset_lookup_fn, task_keys = dataset_lookup_builder()
model = _build_model(config, task_keys)
prng = hk.PRNGSequence(train_context.prng_seed)
# Data for work-unit
train_dataset = dataset_lookup_fn(train_context.event.train_dataset_key)
valid_dataset = dataset_lookup_fn(train_context.event.dev_dataset_key)
task_key = train_dataset.task_key
train_augment_fn = functools.partial(config.augmentations.train.ctor,
**config.augmentations.train.kwargs)
eval_augment_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
adapted_batch_size = _adapt_batch_size_to_dataset_size(
config, train_dataset.num_examples)
train_iter_fn = dataloaders.build_train_iterator(train_dataset,
train_augment_fn,
adapted_batch_size)
valid_iter_fn = dataloaders.build_prediction_iterator(
valid_dataset, eval_augment_fn, config.batch.kwargs.batch_size)
steps_per_epoch = train_dataset.num_examples // adapted_batch_size + 1
# If learning rate schedule is provided, we use it.
learning_rate_schedule = learning_rate_schedules.build_learning_rate_schedule(
config.optimization.learning_rate_schedule.name,
config.optimization.learning_rate_schedule.init_learning_rate,
steps_per_epoch, config.max_steps,
config.optimization.learning_rate_schedule.kwargs)
if "learning_rate" in config.optimization.optimizer.kwargs:
raise ValueError(
"`learning_rate` argument must not be specified in the optimizer as"
" it would be overridden by the learning rate schedule.")
optimizer = config.optimization.optimizer.ctor(
learning_rate=learning_rate_schedule,
**config.optimization.optimizer.kwargs)
update_fn = trainer.build_update_fn(task_key, model, optimizer)
initial_train_state = _initialize_train_from_context(
train_context, config, prng, model, optimizer)
opt_state = optimizer.init(initial_train_state.trainable_params)
initial_train_state = dataclasses.replace(
initial_train_state, opt_state=opt_state)
datawriter_train = config.get_metrics_writer(
"learner_train",
index_of_training_event=train_context.train_task_index,
overrides=overrides)
datawriter_eval = config.get_metrics_writer(
"learner_eval",
index_of_training_event=train_context.train_task_index,
overrides=overrides)
training_context_for_metrics = {"adapted_batch_size": adapted_batch_size}
cost, _, train_state, flops_used = fit_with_early_stopping(
initial_train_state=initial_train_state,
update_fn=jax.jit(update_fn),
loss_and_metrics_fn=model.loss_and_metrics[task_key],
train_iter_fn=train_iter_fn,
valid_iter_fn=valid_iter_fn,
validation_metric=config.validation_metric,
run_validation_every_n_steps=config.run_validation_every_n_steps,
early_stopping_grace=config.early_stopping_grace,
max_steps=config.max_steps,
training_context_for_metrics=training_context_for_metrics,
train_metrics_writer=datawriter_train,
validation_metrics_writer=datawriter_eval,
overrides=overrides,
write_checkpoint=write_checkpoint,
checkpoint_to_resume=checkpoint_to_resume)
resources_used = learner_interface.ResourceUsage(
floating_point_operations=flops_used)
datawriter_train.flush()
datawriter_train.close()
datawriter_eval.flush()
datawriter_eval.close()
# train states are converted to numpy before returning, since JAX arrays
# automatically get sent to GPU / TPU memory when they are unpickled, which
# we can cause devices to run out of memory.
train_state_np = jax.tree_map(np.asarray, train_state)
return cost, train_state_np, resources_used
return cost_function
def _initialize_train_from_context(train_context, config, prng, model,
optimizer):
"""Initialize trainer state based on the context."""
if train_context.initial_train_state is not None:
logging.info("Initializing train state from a previous state")
return train_context.initial_train_state
else:
logging.info("Initializing a new train state")
load_params_fun = config.load_params_fn
if "load_params_fn_with_kwargs" in config:
load_params_fun = functools.partial(
config.load_params_fn_with_kwargs.fun,
**config.load_params_fn_with_kwargs.kwargs)
return trainer.init_train_state(
next(prng), model, optimizer, load_params_fun)
def _run_validation(
state: FitWithEarlyStoppingState,
valid_data_iter: Iterator[datasets.MiniBatch],
loss_and_metrics_fn: models.LossAndMetricsFn,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any]]:
"""Runs validation and returns the cost and metrics."""
start_time = time.monotonic()
metrics = _validate_batches(state.train_state, loss_and_metrics_fn,
valid_data_iter)
elapsed = time.monotonic() - start_time
metrics = jax.tree_map(np.mean, metrics)
metrics.update(
state.overrides,
step=state.step,
validation_runtime_seconds=elapsed,
lr_decay_scale=state.lr_decay_scale,
lr_decay_steps=state.lr_decay_steps,
)
if additional_diagnostics:
metrics.update(additional_diagnostics)
logging.info(
"Validation completed in %.3f seconds.\n"
"Validation metrics for step %d:\n%s", elapsed, state.step,
"\n".join(f" {k}: {_prettify_value(v)}" for k, v in metrics.items()))
return float(metrics[state.validation_metric]), metrics
def _validate_batches(
train_state: trainer.TrainState,
loss_and_metrics_fn: models.LossAndMetricsFn,
batch_iter: dataloaders.BatchIterator,
) -> Dict[str, float]:
"""Perform a validation run and report the metrics computed."""
rng = jax.random.PRNGKey(0)
params = hk.data_structures.merge(train_state.trainable_params,
train_state.frozen_params)
all_diagnostics = []
for batch in batch_iter:
# If the task has a single label, then batch.label points to an array. If
# the task is binary multinomial, then this slot is not set. In that case,
# we get the label from batch.multi_label_one_hot which is a matrix with
# binary values.
targets = batch.label
if targets is None:
targets = batch.multi_label_one_hot
(_, diagnostics), _ = loss_and_metrics_fn(params, train_state.state, rng,
batch.image, targets, False)
diagnostics = jax.tree_map(lambda x: x.mean(), diagnostics)
all_diagnostics.append(diagnostics)
return jax.tree_map(lambda *x: np.array(x).mean(), *all_diagnostics)
def fit_with_early_stopping(
initial_train_state: trainer.TrainState,
update_fn: trainer.UpdateFn,
loss_and_metrics_fn: models.LossAndMetricsFn,
train_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
valid_iter_fn: Callable[[], Iterator[datasets.MiniBatch]],
validation_metric: str,
run_validation_every_n_steps: int,
early_stopping_grace: int,
max_steps: int,
training_context_for_metrics: Dict[str, Any],
train_metrics_writer: datawriter_interface.DataWriter,
validation_metrics_writer: datawriter_interface.DataWriter,
write_checkpoint: Callable[[FitWithEarlyStoppingState], None],
overrides: Optional[hyperparameter_searcher.Overrides] = None,
checkpoint_to_resume: Optional[FitWithEarlyStoppingState] = None,
additional_diagnostics: Optional[Dict[str, Any]] = None,
) -> Tuple[float, Dict[str, Any], trainer.TrainState, float]:
"""Fit model with early stopping and dynamic LR schduling."""
overrides = overrides or {}
additional_diagnostics = additional_diagnostics or {}
# TODO: This is different form the agreed upon plan for
# learning-rate decay and early stopping (http://shortn/_zxVC5Kbv6c).
#
# Currently implemented logic:
# * perform evaluation on valid_iter every `run_validation_every_n_steps`
# steps. Lower bounding it to MIN_VALIDATION_PERIOD ensures that even on
# very small datasets we perform a certain amount of gradient steps before
# stopping. Without this tweak we might stop too aggressively due to
# high noise from individual gradient-steps.
# * if no improvement in validation metric for >= `early_stopping_grace`
# iterations; either half learning rate, or stop training if learning
# rate is already less than 1/10th of the initial LR.
#
# At the end of the run, the best model that *minimizes* the validation metric
# will be returned.
if checkpoint_to_resume is None:
logging.info("Starting new train loop...")
state = FitWithEarlyStoppingState( # pytype: disable=wrong-arg-types # mapping-is-not-sequence
step=0,
best_age=0,
best_metric=np.inf,
train_state=initial_train_state,
best_train_state=None,
lr_decay_steps=0,
lr_decay_scale=jnp.ones([]),
validation_metric=validation_metric,
overrides=overrides,
)
else:
logging.info("Resuming train loop from checkpoint...")
state: FitWithEarlyStoppingState = checkpoint_to_resume
step_timer = trainer.StepCountEstimator()
train_iter = train_iter_fn()
while state.step < max_steps:
start_time = time.monotonic()
batch = next(train_iter)
logging.log_every_n_seconds(logging.INFO,
"Step: %d/%d, Batch %s, Steps per second: %f",
LOG_INTERVAL_SECONDS, state.step + 1, max_steps,
batch, step_timer.estimated_steps_per_second())
state.train_state, train_metrics = update_fn(batch, state.train_state,
state.lr_decay_scale)
train_metrics = jax.tree_map(jnp.mean, train_metrics)
train_metrics.update(
overrides,
step=state.step,
steps_per_second=step_timer.estimated_steps_per_second(),
lr_decay_scale=state.lr_decay_scale,
lr_decay_steps=state.lr_decay_steps,
)
train_metrics.update(training_context_for_metrics)
train_metrics.update(additional_diagnostics)
train_metrics_writer.write(train_metrics)
if state.step % CHECKPOINT_INTERVAL == 0 and state.step != 0:
logging.info("Writing checkpoint at step %d", state.step)
write_checkpoint(state)
if state.step % run_validation_every_n_steps == 0:
validation_metric, valid_metrics = _run_validation(
state, valid_iter_fn(), loss_and_metrics_fn)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = state.train_state
state.best_age = 0
else:
state.best_age += 1
if state.best_age >= early_stopping_grace:
if state.lr_decay_steps <= MAX_LR_DECAY_STEPS:
logging.info("Validation metrics plateaued, halfing learning rate.")
state.best_age = 0
state.lr_decay_steps += 1
state.lr_decay_scale /= 2
else:
logging.info("Validation metrics plateaued, stopping training.")
break
step_timer.add_measurement(time.monotonic() - start_time)
state.step += 1
logging.info("Running final validation.")
validation_metric, valid_metrics = _run_validation(state, valid_iter_fn(),
loss_and_metrics_fn)
validation_metrics_writer.write(valid_metrics)
if validation_metric < state.best_metric:
state.best_metric = validation_metric
state.best_train_state = state.train_state
state.best_age = 0
# TODO: Take validation FLOPs into account
train_flops = state.step * resources.estimate_flops(update_fn, batch,
state.train_state)
return validation_metric, valid_metrics, state.best_train_state, train_flops
def _get_train_state_for_finetuning(
config: ml_collections.ConfigDict,
task_key: tasks.TaskKey,
state: FinetuningLearnerState,
event: streams.TrainingEvent,
dataset_lookup: experiment.DatasetLookupFn,
model: models.Model,
finetuning_metrics: datawriter_interface.DataWriter,
) -> Optional[trainer.TrainState]:
"""Optionally returns a train state to fine tune from."""
if config.finetuning.strategy is FinetuningStrategy.INDEPENDENT:
logging.info("For independent training, no initial train state is used %s",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
elif config.finetuning.strategy is FinetuningStrategy.PREVIOUS:
if not state.train_states:
logging.info(
"Finetuning enabled for %s, but there are no previous tasks.",
task_key)
_write_finetuning_entry(finetuning_metrics, state, task_key, None)
return None
else:
source_task, train_state_checkpoint_path = state.train_states[-1]
logging.info("Finetuning %s from previous task: %s.", task_key,
source_task)
train_state = trainer.restore_train_state(train_state_checkpoint_path)
_write_finetuning_entry(finetuning_metrics, state, task_key, source_task)
return train_state
elif config.finetuning.strategy is FinetuningStrategy.TRANSFER_MATRIX:
m = transfer_oracle.TransferMatrix.from_file(
config.finetuning.transfer_matrix_file)
train_state_checkpoint_path, source_task = _init_from_transfer_matrix(
m, task_key, state.train_states)
train_state = trainer.restore_train_state(train_state_checkpoint_path)
_write_finetuning_entry(finetuning_metrics, state, task_key, source_task)
return train_state
elif config.finetuning.strategy is FinetuningStrategy.DYNAMIC_KNN_TRANSFER_MATRIX:
train_state_checkpoint_path, source_task = _init_from_knn_transfer(
config, task_key, state, event, dataset_lookup, model)
train_state = trainer.restore_train_state(train_state_checkpoint_path)
_write_finetuning_entry(finetuning_metrics, state, task_key, source_task)
return train_state
raise ValueError(f"Unsupported strategy: {config.finetuning_strategy}")
def _init_from_knn_transfer(
config: ml_collections.ConfigDict,
task_key: tasks.TaskKey,
state: FinetuningLearnerState,
event: streams.TrainingEvent,
dataset_lookup: experiment.DatasetLookupFn,
model: models.Model,
) -> Tuple[Optional[str], Optional[tasks.TaskKey]]:
"""Computes a transfer matrix by evaluating embeddings with a KNN."""
# TODO: Return FLOPS used and include those.
logging.info("Using KNN finetuning strategy...")
# For some tasks, the best task to transfer from may actually be parameters
# initialized at random. This is by definition the case for the first
# task.
rng = jax.random.PRNGKey(config.prng_seed + len(state.train_states))
randomly_initialized_state = trainer.init_train_state(
rng,
model,
optax.identity(),
config.load_params_fn,
log_model_summary=False,
)
randomly_initialized_task = tasks.TaskKey(
DUMMY_TASK_NAME_RANDOM_PARAMS,
tasks.TaskKind.CLASSIFICATION,
tasks.ClassificationMetadata(1),
)
available_train_states = [
*state.train_states,
(randomly_initialized_task, randomly_initialized_state),
]
@hk.transform_with_state
def forward(image):
backbone = config.model.ctor(**config.model.kwargs, name="backbone")
# Note(rhemsley): we set is_training, since this ensures that the
# model will work even for randomly initialized models where, for example,
# no statatistics for batch norm have yet been accumulated. If we were to
# use is_trianing=False in that case, we would get NaN errors.
return backbone(image, is_training=True)
def embedding_fn(train_state, batch):
if isinstance(train_state, str):
train_state = trainer.restore_train_state(train_state)
params = hk.data_structures.merge(train_state.trainable_params,
train_state.frozen_params)
result, _ = jax.jit(forward.apply)(
params,
train_state.state,
train_state.rng,
batch.image,
)
return np.array(result)
preprocessing_fn = functools.partial(config.augmentations.eval.ctor,
**config.augmentations.eval.kwargs)
m = transfer_matrix_from_knn.compute_transfer_matrix_using_knn_classifier(
embedding_fn,
available_train_states,
dataset_lookup(event.train_dataset_key),
dataset_lookup(event.dev_dataset_key),
preprocessing_fn=preprocessing_fn,
batch_size=config.finetuning.batch_size_embed_for_knn,
)
return _init_from_transfer_matrix(m, task_key, state.train_states)
def _init_from_transfer_matrix(
transfer_matrix: transfer_oracle.TransferMatrix,
task_key: tasks.TaskKey,
train_states: Sequence[Tuple[tasks.TaskKey, str]],
) -> Tuple[Optional[str], Optional[tasks.TaskKey]]:
"""Select the best train state to initialize from, given a transfer matrix."""
for source_key, _ in transfer_matrix.transfer_tasks(task_key):
# TODO: We might want to filter out source-tasks with
# negative transfer. But that information is not always available.
# A KNN based transfer matrix for example can generally only rank
# source-tasks, but does not provide a cut-off information.
for a_source_key, a_train_state_checkpoint_path in train_states:
if a_source_key == source_key:
logging.info("Transfer Matrix: Finetuning %s from previous task %s",
task_key.name, source_key.name)
return a_train_state_checkpoint_path, source_key
logging.info(
"Transfer Matrix: No source task for target %s, training from scratch.",
task_key)
return None, None
def _verify_all_tasks_supported(task_keys: Iterable[tasks.TaskKey]) -> None:
unsupported_tasks = set(key.kind for key in task_keys) - SUPPORTED_TASK_KINDS
if unsupported_tasks:
raise NotImplementedError(
f"Got unsupported tasks: {unsupported_tasks}. "
"If required, you may use streams.FilteredStream "
"to construct a stream that removes cetain tasks.")
def _get_latest_train_state_for_predictions(
state: FinetuningLearnerState,
task_key: tasks.TaskKey) -> trainer.TrainState:
for key, train_state_checkpoint_path in reversed(state.train_states):
if key == task_key:
return trainer.restore_train_state(train_state_checkpoint_path)
raise ValueError(
f"Cannot compute predicions for task that has not been trained: {task_key}"
)
def _build_model(config: ml_collections.ConfigDict,
task_keys: Sequence[tasks.TaskKey]) -> models.Model:
"""Constructs the parameterized, trainable model."""
# In this learner, every task has its own set of parameters, and
# so the backbone should be identical for all heads.
return models.build_model(
functools.partial(config.model.ctor, **config.model.kwargs),
supported_tasks=task_keys,
image_resolution=config.image_resolution,
head_kwargs={"label_smoothing": config.label_smoothing})
def _apply_overrides(
base_config: ml_collections.ConfigDict,
overrides: Any,
) -> ml_collections.ConfigDict:
"""Creates all configs from a sweep."""
cfg = copy.deepcopy(base_config)
cfg.update_from_flattened_dict(overrides)
cfg = cfg.copy_and_resolve_references()
return cfg
def _build_search_space(
search_space_creator: Callable[[int, int], SearchSpace],
num_trials: int,
*,
seed: int,
) -> Sequence[hyperparameter_searcher.Overrides]:
"""Constructs the hyperparameter search space for an individual task.
The overrides are applied to the learner's config just before training begins.
This means that there are some values that could be overridden that would not
actually have any effect. For example, overriding the number of trials would
have no effect, since these overrides are applied _after_ that value has
already been used.
Unlike the case for XManager, it is crucial that the returned search space
be a deterministic sequence. If the search space were to be nondeterministic,
then it would not be possible to resume after a preemption - since it would
no longer be possible to unify existing, computed results, with new results
unless they share the same search space.
Args:
search_space_creator: A callable of seed and num_trials producing an
iterable over overrides.
num_trials: The number of points to sample from the search space
distirbution.
seed: The random seed used for sampling. The output of this function must be
idempotent given the value of the seed and num_trials.
Returns:
A deterministic sequence of key, value pairs for overriding the learner
config. Note that by default hyper returns an iterator, but we require a
sequence. Otherwise, the search space may only be traversed once.
"""
return list(search_space_creator(seed, num_trials))
def _adapt_batch_size_to_dataset_size(config: ml_collections.ConfigDict,
dataset_size: int) -> int:
"""Returns the training batch size according to the requested strategy.
Args:
config: The experiment config. The function uses more particularly
config.batch. It supposes that the latter has two fields: type, for the
batching strategy and kwargs for its required arguments (batch_size for
the fixed strategy and batch_size, max_batch_size and size_proportion for
the adaptive strategy).
dataset_size: The dataset size used to compute the adaptive batch size when
the adaptive strategy is used.
Returns:
An integer corresponding to the batch size to use for training.
"""
if config.batch.type == BatchSizeStrategy.FIXED:
return config.batch.kwargs.batch_size
elif config.batch.type == BatchSizeStrategy.ADAPTIVE:
return min(
config.batch.kwargs.max_batch_size,
max(
16,
int(2**int(
np.log2(config.batch.kwargs.size_proportion * dataset_size)))))
raise ValueError("Unknown batch size type, should be fixed or adaptive.")
def _finetuning_metrics_writer() -> datawriter_interface.DataWriter:
"""Create a metrics writer to write information about selected tasks."""
return logging_writer.LoggingWriter("finetuning_metrics")
def _write_finetuning_entry(
finetuning_metrics: datawriter_interface.DataWriter,
state: FinetuningLearnerState,
current_task: tasks.TaskKey,
finetune_from_task: Optional[tasks.TaskKey],
) -> None:
"""Write to a dataframe the selected task to finetune from."""
if finetune_from_task:
finetune_from_task_name = finetune_from_task.name
else:
finetune_from_task_name = None
finetuning_metrics.write({
"index_of_train_event": len(state.train_states),
"current_task": current_task.name,
"finetune_from_task": finetune_from_task_name,
})
def _prettify_value(value):
try:
return f"{value:.2f}"
except ValueError:
return f"{value}"
|
dm_nevis-master
|
experiments_jax/learners/finetuning_dknn/finetuning_dknn_learner.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain learner on only imagenet using a ResNet18 (cheap architecture).
The resulting checkpoint can then be taken as initialization for all subsequent
experiments using the pre-trained baseline (PT).
"""
import functools
import os
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.finetuning import finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import nevis_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 50_0000
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': streams.FilteredStream,
'kwargs': {
'stream_ctor':
nevis_stream.NevisStream,
'supported_task_kinds': [tasks.TaskKind.CLASSIFICATION],
'stream_variant':
nevis_stream.NevisStreamVariant.IMAGENET_ONLY,
'predict_event_splits': (
nevis_stream.Split.DEV,
nevis_stream.Split.DEV_TEST,
nevis_stream.Split.TEST,
),
},
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.ResNet18,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
def get_test_config() -> ml_collections.ConfigDict:
"""A config suitable for tests, designed to complete quickly on CPU."""
base_config = get_config()
# Use the test stream.
base_config.experiment.stream.ctor = test_stream.TestStream
base_config.experiment.stream.kwargs = {}
# Use a linear model
base_config.experiment.learner.config.model.ctor = modules.FlattenOnly
base_config.experiment.learner.config.model.kwargs = {}
# Run at most one optimization step with batch size 2
base_config.experiment.learner.config.max_steps = 1
base_config.experiment.learner.config.batch_size = 2
# Use the constant l.r. schedule
base_config.experiment.learner.config.optimization.learning_rate_schedule = ml_collections.ConfigDict(
{
'name': 'constant',
'init_learning_rate': 0.1, # Can be overrided by the learner.
'kwargs': {},
})
return base_config
|
dm_nevis-master
|
experiments_jax/configs/pretrain_imagenet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_jax/configs/__init__.py
|
"""Finetuning learner configuration which forms the basis for most experiments.
"""
import concurrent
import functools
import os
from typing import Any, Optional
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.finetuning_dknn import finetuning_dknn_learner as finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import hype
from experiments_jax.training import hyperparameter_searcher
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config(sweep_name: Optional[str] = None) -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
experiment_name = 'Finetuning'
if sweep_name:
experiment_name += f' ({sweep_name})'
def search_space_creator(seed, num_trials):
return hype.zipit(
hype.sweep(
'optimization.learning_rate_schedule.init_learning_rate',
hype.log_uniform_random_values(
1e-4,
1e-1,
seed=seed + 1,
n=num_trials,
),
),
hype.sweep(
'label_smoothing',
hype.uniform_random_values(
0.0,
0.3,
seed=seed + 2,
n=num_trials,
),
))
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'sweep': _get_sweep(sweep_name),
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {},
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
'transfer_matrix_file':
None,
'batch_size_embed_for_knn':
128,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.CifarResNet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn':
None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hype-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
# Learner search space.
'search_space_creator':
search_space_creator,
# The hyperparameter searcher configuration
'distributed_worker_builder':
functools.partial(
hyperparameter_searcher.build_local_executor_workers,
executor=concurrent.futures.ThreadPoolExecutor),
'num_workers':
16,
# The parameter search-space is currently hard-coded.
'trials_per_task':
16,
'validation_metric':
'error',
# Early-stopping configuration
'max_steps':
DEFAULT_MAX_STEPS,
'early_stopping_grace':
DEFAULT_MAX_STEPS,
'run_validation_every_n_steps':
1_000,
'image_resolution':
IMAGE_SIZE,
'label_smoothing':
0.0,
'prng_seed':
1,
'batch': {
'type': finetuning_learner.BatchSizeStrategy.ADAPTIVE,
'kwargs': {
'size_proportion': 0.0025,
'batch_size': 256,
'max_batch_size': 512,
},
},
'get_metrics_writer': metrics_logger_fn
},
},
}
})
return config
def _get_sweep(sweep_name: Optional[str]) -> Any:
"""Returns a sweep by name."""
if sweep_name is None:
return hype.product([])
sweeps_to_include = set(sweep_name.split(','))
hyperparameter_iterators = []
if 'max_1000_steps' in sweeps_to_include:
hyperparameter_iterators.append(_max_1000_steps_sweep())
if 'max_10_000_steps' in sweeps_to_include:
hyperparameter_iterators.append(_max_10000_steps_sweep())
if 'run_validation_every_n_steps_ablation' in sweeps_to_include:
hyperparameter_iterators.append(
_run_validation_every_n_steps_ablation_sweep())
if 'number_of_steps' in sweeps_to_include or 'warmup_epochs' in sweeps_to_include:
hyperparameter_iterators.append(number_of_steps_sweep())
if 'number_of_trials' in sweeps_to_include:
hyperparameter_iterators.append(number_of_trials_sweep())
if 'seeds' in sweeps_to_include:
hyperparameter_iterators.append(_seed_sweep())
if 'models' in sweeps_to_include:
hyperparameter_iterators.append(
hype.sweep('experiment.learner.config.model.ctor',
[modules.VGG, resnet.CifarResNet34]))
strategy_sweep = _finetuning_strategy_sweep(sweeps_to_include)
if strategy_sweep is not None:
hyperparameter_iterators.append(strategy_sweep)
if not hyperparameter_iterators:
raise ValueError('Unrecognized sweep name.')
return hype.product(hyperparameter_iterators)
def number_of_steps_sweep():
return hype.zipit([
hype.sweep(
'experiment.learner.config.max_steps',
[DEFAULT_MAX_STEPS // 2, DEFAULT_MAX_STEPS, DEFAULT_MAX_STEPS * 2]),
hype.sweep(
'experiment.learner.config.early_stopping_grace',
[DEFAULT_MAX_STEPS // 2, DEFAULT_MAX_STEPS, DEFAULT_MAX_STEPS * 2]),
hype.sweep(
'experiment.learner.config.optimization.learning_rate_schedule.kwargs.warmup_epochs',
[
DEFAULT_WARMUP_EPOCHS // 2, DEFAULT_WARMUP_EPOCHS,
DEFAULT_WARMUP_EPOCHS * 2
]),
])
def number_of_trials_sweep():
return hype.sweep('experiment.learner.config.trials_per_task', [16, 32, 64])
def _seed_sweep():
return hype.sweep('experiment.learner.config.prng_seed', [1, 2, 3, 4, 5])
def get_test_config() -> ml_collections.ConfigDict:
"""A config suitable for TAP, designed to complete quickly on CPU."""
base_config = get_config()
# Use the test stream.
base_config.experiment.stream.ctor = test_stream.TestStream
base_config.experiment.stream.kwargs = {}
# Use a linear model
base_config.experiment.learner.config.model.ctor = modules.FlattenOnly
base_config.experiment.learner.config.model.kwargs = {}
# Run at most one optimization step with batch size 2
base_config.experiment.learner.config.max_steps = 1
base_config.experiment.learner.config.batch_size = 2
# Use the constant l.r. schedule
base_config.experiment.learner.config.optimization.learning_rate_schedule = ml_collections.ConfigDict(
{
'name': 'constant',
'init_learning_rate': 0.1, # Can be overrided by the learner.
'kwargs': {},
})
# Sweep at most 2 models in the search space
base_config.experiment.learner.config.trials_per_task = 2
base_config.experiment.learner.config.num_workers = 1
# Use the Dynamic KNN strategy, as it exercises the most code paths.
base_config.experiment.learner.config.finetuning.strategy = finetuning_learner.FinetuningStrategy.DYNAMIC_KNN_TRANSFER_MATRIX
return base_config
def _max_1000_steps_sweep():
return hype.sweep('experiment.learner.config.max_steps', [1_000])
def _max_10000_steps_sweep():
return hype.sweep('experiment.learner.config.max_steps', [10_000])
def _run_validation_every_n_steps_ablation_sweep():
return hype.sweep('experiment.learner.config.run_validation_every_n_steps', [
10,
100,
1_000,
2_000,
])
def _finetuning_strategy_sweep(sweeps_to_include):
"""Constructs a sweep over the named finetuning strategies."""
strategies = []
paths = []
if 'independent' in sweeps_to_include:
strategies.append(finetuning_learner.FinetuningStrategy.INDEPENDENT)
paths.append('')
if 'previous' in sweeps_to_include:
strategies.append(finetuning_learner.FinetuningStrategy.PREVIOUS)
paths.append('')
if 'dynamic_knn_transfer_matrix' in sweeps_to_include:
strategies.append(
finetuning_learner.FinetuningStrategy.DYNAMIC_KNN_TRANSFER_MATRIX)
paths.append('')
if not strategies:
return None
return hype.zipit([
hype.sweep(
'experiment.learner.config.finetuning.strategy',
strategies,
),
hype.sweep(
'experiment.learner.config.finetuning.transfer_matrix_file',
paths,
),
])
|
dm_nevis-master
|
experiments_jax/configs/finetuning_dknn.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning learner configuration which is the basis for most experiments."""
import functools
import os
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.finetuning import finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.CifarResNet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
def get_test_config() -> ml_collections.ConfigDict:
"""A config suitable for tests, designed to complete quickly on CPU."""
base_config = get_config()
# Use the test stream.
base_config.experiment.stream.ctor = test_stream.TestStream
base_config.experiment.stream.kwargs = {}
# Use a linear model
base_config.experiment.learner.config.model.ctor = modules.FlattenOnly
base_config.experiment.learner.config.model.kwargs = {}
# Run at most one optimization step with batch size 2
base_config.experiment.learner.config.max_steps = 1
base_config.experiment.learner.config.batch_size = 2
# Use the constant l.r. schedule
base_config.experiment.learner.config.optimization.learning_rate_schedule = ml_collections.ConfigDict(
{
'name': 'constant',
'init_learning_rate': 0.1, # Can be overrided by the learner.
'kwargs': {},
})
return base_config
|
dm_nevis-master
|
experiments_jax/configs/finetuning_ind.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example learner config.
This is for use with `learners/example`, and is intended to show how to
implement a bare-bones learner.
"""
import functools
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.example import example_learner
from dm_nevis.streams import nevis_stream
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
return ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': nevis_stream.NevisStream,
'kwargs': {
'stream_variant': nevis_stream.NevisStreamVariant.DEBUG,
}
},
'learner': {
'learner_builder': example_learner.learner_builder,
'config': {
'get_metrics_writer': metrics_logger_fn
}
},
},
})
|
dm_nevis-master
|
experiments_jax/configs/example.py
|
"""Cheaper config, uses standard ResNet18 and smaller hyper-param search space.
"""
import concurrent
import functools
import os
from typing import Any, Optional
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.finetuning_dknn import finetuning_dknn_learner as finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import hype
from experiments_jax.training import hyperparameter_searcher
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 25_000 # Reduced number of gradient steps.
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config(sweep_name: Optional[str] = None) -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
experiment_name = 'Cheap Finetuning'
if sweep_name:
experiment_name += f' ({sweep_name})'
# Search over four values of learning rate only, fixed label smoothing.
def search_space_creator(seed, num_trials):
del seed, num_trials
return hype.zipit([
hype.sweep('optimization.learning_rate_schedule.init_learning_rate',
[1e-4, 1e-3, 1e-2, 1e-1]),
hype.sweep('label_smoothing', [0.15, 0.15, 0.15, 0.15]),
])
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'sweep': _get_sweep(sweep_name),
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {},
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path':
DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
'transfer_matrix_file':
None,
'batch_size_embed_for_knn':
128,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.ResNet18, # Smaller network.
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn':
None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hype-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
# Learner search space.
'search_space_creator':
search_space_creator,
# The hyperparameter searcher configuration
'distributed_worker_builder':
functools.partial(
hyperparameter_searcher.build_local_executor_workers,
executor=concurrent.futures.ThreadPoolExecutor),
'num_workers': # Set this to the number of available devices.
1,
# The parameter search-space is currently hard-coded.
'trials_per_task':
4,
'validation_metric':
'error',
# Early-stopping configuration
'max_steps':
DEFAULT_MAX_STEPS,
'early_stopping_grace':
DEFAULT_MAX_STEPS,
'run_validation_every_n_steps':
1_000,
'image_resolution':
IMAGE_SIZE,
'label_smoothing':
0.15,
'prng_seed':
1,
'batch': {
'type': finetuning_learner.BatchSizeStrategy.ADAPTIVE,
'kwargs': {
'size_proportion': 0.0025,
'batch_size': 256,
'max_batch_size': 512,
},
},
'get_metrics_writer':
metrics_logger_fn
},
},
}
})
return config
def _get_sweep(sweep_name: Optional[str]) -> Any:
"""Returns a sweep by name."""
if sweep_name is None:
return hype.product([])
sweeps_to_include = set(sweep_name.split(','))
hyperparameter_iterators = []
if 'max_1000_steps' in sweeps_to_include:
hyperparameter_iterators.append(_max_1000_steps_sweep())
if 'max_10_000_steps' in sweeps_to_include:
hyperparameter_iterators.append(_max_10000_steps_sweep())
if 'run_validation_every_n_steps_ablation' in sweeps_to_include:
hyperparameter_iterators.append(
_run_validation_every_n_steps_ablation_sweep())
if 'number_of_steps' in sweeps_to_include or 'warmup_epochs' in sweeps_to_include:
hyperparameter_iterators.append(number_of_steps_sweep())
if 'seeds' in sweeps_to_include:
hyperparameter_iterators.append(_seed_sweep())
if 'models' in sweeps_to_include:
hyperparameter_iterators.append(
hype.sweep('experiment.learner.config.model.ctor',
[modules.VGG, resnet.CifarResNet34]))
strategy_sweep = _finetuning_strategy_sweep(sweeps_to_include)
if strategy_sweep is not None:
hyperparameter_iterators.append(strategy_sweep)
if not hyperparameter_iterators:
raise ValueError('Unrecognized sweep name.')
return hype.product(hyperparameter_iterators)
def number_of_steps_sweep():
return hype.zipit([
hype.sweep(
'experiment.learner.config.max_steps',
[DEFAULT_MAX_STEPS // 2, DEFAULT_MAX_STEPS, DEFAULT_MAX_STEPS * 2]),
hype.sweep(
'experiment.learner.config.early_stopping_grace',
[DEFAULT_MAX_STEPS // 2, DEFAULT_MAX_STEPS, DEFAULT_MAX_STEPS * 2]),
hype.sweep(
'experiment.learner.config.optimization.learning_rate_schedule.kwargs.warmup_epochs',
[
DEFAULT_WARMUP_EPOCHS // 2, DEFAULT_WARMUP_EPOCHS,
DEFAULT_WARMUP_EPOCHS * 2
]),
])
def _seed_sweep():
return hype.sweep('experiment.learner.config.prng_seed', [1, 2, 3, 4, 5])
def _max_1000_steps_sweep():
return hype.sweep('experiment.learner.config.max_steps', [1_000])
def _max_10000_steps_sweep():
return hype.sweep('experiment.learner.config.max_steps', [10_000])
def _run_validation_every_n_steps_ablation_sweep():
return hype.sweep('experiment.learner.config.run_validation_every_n_steps', [
10,
100,
1_000,
2_000,
])
def _finetuning_strategy_sweep(sweeps_to_include):
"""Constructs a sweep over the named finetuning strategies."""
strategies = []
paths = []
if 'independent' in sweeps_to_include:
strategies.append(finetuning_learner.FinetuningStrategy.INDEPENDENT)
paths.append('')
if 'previous' in sweeps_to_include:
strategies.append(finetuning_learner.FinetuningStrategy.PREVIOUS)
paths.append('')
if 'dynamic_knn_transfer_matrix' in sweeps_to_include:
strategies.append(
finetuning_learner.FinetuningStrategy.DYNAMIC_KNN_TRANSFER_MATRIX)
paths.append('')
if not strategies:
return None
return hype.zipit([
hype.sweep(
'experiment.learner.config.finetuning.strategy',
strategies,
),
hype.sweep(
'experiment.learner.config.finetuning.transfer_matrix_file',
paths,
),
])
|
dm_nevis-master
|
experiments_jax/configs/cheap_finetuning_dknn.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning pretrained model from a checkpoint."""
import functools
import os
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.environment import pretrained_model_loader
from experiments_jax.learners.finetuning import finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
DEFAULT_PRETRAIN_CHECKPOINT_PATH = os.path.join(DEFAULT_CHECKPOINT_DIR,
'pretraining.pkl')
FREEZE_PRETRAINED_BACKBONE = False
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface.
"""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.INDEPENDENT,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.CifarResNet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'load_params_fn_with_kwargs': {
'fun':
pretrained_model_loader.load_model_params_from_ckpt,
'kwargs': {
'freeze_pretrained_backbone':
FREEZE_PRETRAINED_BACKBONE,
'checkpoint_path':
DEFAULT_PRETRAIN_CHECKPOINT_PATH
},
},
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
def get_test_config() -> ml_collections.ConfigDict:
"""A config suitable for tests, designed to complete quickly on CPU."""
base_config = get_config()
# Use the test stream.
base_config.experiment.stream.ctor = test_stream.TestStream
base_config.experiment.stream.kwargs = {}
# Use a linear model
base_config.experiment.learner.config.model.ctor = modules.FlattenOnly
base_config.experiment.learner.config.model.kwargs = {}
# Run at most one optimization step with batch size 2
base_config.experiment.learner.config.max_steps = 1
base_config.experiment.learner.config.batch_size = 2
# Use the constant l.r. schedule
base_config.experiment.learner.config.optimization.learning_rate_schedule = ml_collections.ConfigDict(
{
'name': 'constant',
'init_learning_rate': 0.1, # Can be overrided by the learner.
'kwargs': {},
})
return base_config
|
dm_nevis-master
|
experiments_jax/configs/finetuning_ind_pretrained.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning learner configuration which is the basis for most experiments."""
import functools
import os
from dm_nevis.benchmarker.datasets import test_stream
from dm_nevis.benchmarker.environment import logger_utils
from experiments_jax.learners.finetuning import finetuning_learner
from experiments_jax.training import augmentations
from experiments_jax.training import modules
from experiments_jax.training import optimizers
from experiments_jax.training import resnet
from dm_nevis.streams import example_stream
import ml_collections
IMAGE_SIZE = 64
DEFAULT_MAX_STEPS = 50_000
DEFAULT_WARMUP_EPOCHS = 7
DEFAULT_EARLY_STOPPING_GRACE = 10
DEFAULT_CHECKPOINT_DIR = os.environ.get('NEVIS_CHECKPOINT_DIR',
'/tmp/nevis_checkpoint_dir')
def get_config() -> ml_collections.ConfigDict:
"""The learner config, satisfying the `experiments.LearnerConfig` interface."""
tensorboard_log_root = logger_utils.generate_tensorboard_log_root()
metrics_logger_fn = functools.partial(logger_utils.get_metrics_writer,
tensorboard_log_root)
config = ml_collections.ConfigDict({
'experiment': {
'resume_from_checkpoint_path': None,
'stream': {
'ctor': example_stream.ExampleStream,
'kwargs': {}
},
'learner': {
'learner_builder': finetuning_learner.learner_builder,
'config': {
'train_states_checkpoint_path': DEFAULT_CHECKPOINT_DIR,
'finetuning': {
# The strategy for initializing train state for each task.
'strategy':
finetuning_learner.FinetuningStrategy.PREVIOUS,
},
'augmentations': {
'eval': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.central_crop_via_cropped_window_and_resize,
{
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.normalize, {}),
],
},
},
'train': {
'ctor': augmentations.chain,
'kwargs': {
'augmentation_ctors_with_kwargs': [
(augmentations
.random_crop_via_cropped_window_and_resize, {
'size': (IMAGE_SIZE, IMAGE_SIZE)
}),
(augmentations.random_flip, {}),
(augmentations.normalize, {}),
],
},
},
},
'model': {
'ctor': resnet.CifarResNet34,
'kwargs': {},
},
# Optionally load and/or freeze pretrained parameters.
'load_params_fn': None,
'optimization': {
# Optimizer, must not have `learning_rate` argument as it
# overridden by `learning_rate_schedule``.
# If `learning_rate_schedule` is off, then `learning_rate`
# can be used.
'optimizer': {
'ctor': optimizers.sgdw,
'kwargs': {
# Overridden by the per-task hyper-optimization.
# Learning rate is specified by the learning rate
# schedule.
'momentum': 0.9,
'nesterov': True,
'weight_decay': 1e-4,
'mask': optimizers.default_weight_decay_mask,
},
},
# Learning rate schedule.
'learning_rate_schedule': {
'name': 'warmup_cosine_decay',
'init_learning_rate':
0.1, # Can be overridden by the learner.
'kwargs': {
'warmup_epochs': DEFAULT_WARMUP_EPOCHS,
'final_learning_rate': 1e-8,
},
},
},
'validation_metric': 'error',
# Early-stopping configuration
'max_steps': DEFAULT_MAX_STEPS,
'early_stopping_grace': DEFAULT_MAX_STEPS,
'run_validation_every_n_steps': 1_000,
'image_resolution': IMAGE_SIZE,
'label_smoothing': 0.0,
'prng_seed': 1,
'batch_size': 256,
'get_metrics_writer': metrics_logger_fn,
},
},
}
})
return config
def get_test_config() -> ml_collections.ConfigDict:
"""A config suitable for tests, designed to complete quickly on CPU."""
base_config = get_config()
# Use the test stream.
base_config.experiment.stream.ctor = test_stream.TestStream
base_config.experiment.stream.kwargs = {}
# Use a linear model
base_config.experiment.learner.config.model.ctor = modules.FlattenOnly
base_config.experiment.learner.config.model.kwargs = {}
# Run at most one optimization step with batch size 2
base_config.experiment.learner.config.max_steps = 1
base_config.experiment.learner.config.batch_size = 2
# Use the constant l.r. schedule
base_config.experiment.learner.config.optimization.learning_rate_schedule = ml_collections.ConfigDict(
{
'name': 'constant',
'init_learning_rate': 0.1, # Can be overrided by the learner.
'kwargs': {},
})
return base_config
|
dm_nevis-master
|
experiments_jax/configs/finetuning_prev.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A checkpointer that saves with pickle."""
import os
import pickle
from typing import Any, Optional
from absl import logging
class PickleCheckpointer:
"""A checkpointer that saves with pickle.
The current checkpointer will always overwrite the most recent checkpoint
in the bash path.
"""
def __init__(self, base_path: str):
"""Creates a pickle checkpointer.
Args:
base_path: Path to write checkpoints to.
Returns: A checkpointer.
"""
os.makedirs(os.path.dirname(base_path), exist_ok=True)
self.base_path = base_path
def write(self, state: Any) -> None:
"""Writes a checkpoint to the base path.
Args:
state: Arbitrary checkpointable state
"""
logging.info("Saving checkpoint to %s", self.base_path)
partial_path = f"{self.base_path}.part"
with open(partial_path, "wb") as f:
pickle.dump(state, f)
os.rename(partial_path, self.base_path)
def restore(self) -> Optional[Any]:
"""Restores the most recent checkpointed state.
Returns:
The most recent checkpoint that was successfully written using write,
or None if no checkpoint state is available.
"""
if not os.path.exists(self.base_path):
logging.warning("No checkpoint found at %s", self.base_path)
return None
logging.info("Restore checkpoint from %s", self.base_path)
with open(self.base_path, "rb") as f:
state = pickle.load(f)
return state
|
dm_nevis-master
|
experiments_jax/environment/pickle_checkpointer.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A dummy checkpointer doing nothing."""
from typing import Any, Optional
from absl import logging
class NoOpCheckpointer:
"""A No-Operation checkpointer doing nothing."""
def __init__(self,
*,
namespace: Optional[str] = None,
base_path: Optional[str] = None,
restore_path: Optional[str] = None):
"""Create a no-op checkpointer.
Args:
namespace: Appended to the base_path, so that checkpoints written with
this writer are independent.
base_path: if set, checkpoints will be written here.
restore_path: path to restore state from.
Returns: A checkpointer.
"""
del namespace, base_path, restore_path
def write(self, state: Any) -> None:
"""Writes a checkpoint.
Args:
state: Arbitrary checkpointable state
"""
del state
logging.warning(
"Received checkpoint write request (ignoring it - no checkpoint will be written)."
)
def restore(self, *, age: int = 0) -> Optional[Any]:
"""Restores the most recent checkpointed state.
Args:
age: if present, the age of the checkpoint to restore.
Returns:
The most recent checkpoint that was successfully written using write,
or None if no checkpoint state is available.
"""
del age
logging.warning(
"Received checkpoint restore request (ignoring it - no checkpoint will be restored)."
)
return None
|
dm_nevis-master
|
experiments_jax/environment/noop_checkpointer.py
|
"""Functions for loading pretrained models from a checkpoint."""
from typing import Tuple
from absl import logging
import chex
from experiments_jax.training import trainer
import haiku as hk
def load_model_params_from_ckpt(
params: hk.Params,
state: hk.State,
freeze_pretrained_backbone: bool = False,
checkpoint_path: str = '',
) -> Tuple[hk.Params, hk.Params, hk.State]:
"""Load pretrained model parameter from a checkpoint.
Args:
params: original params including trainable heads.
state: original states.
freeze_pretrained_backbone: whether to freeze pretrained backbone or not.
checkpoint_path: path to the pretrained checkpointer.
Returns:
updated params split into trainable and frozen, updated states.
"""
trainer_state = trainer.restore_train_state(checkpoint_path)
if trainer_state is None or trainer_state.trainable_params is None or trainer_state.frozen_params is None:
return params, {}, state
restored_params = {
**trainer_state.trainable_params,
**trainer_state.frozen_params
}
def filter_fn(module_name, *unused_args):
del unused_args
return module_name.startswith('backbone')
filtered_original_params = hk.data_structures.filter(filter_fn, params)
filtered_params = hk.data_structures.filter(filter_fn, restored_params)
chex.assert_trees_all_equal_shapes(filtered_original_params, filtered_params)
# replace the initialized params by pretrained params
updated_params = hk.data_structures.merge(params, filtered_params)
if freeze_pretrained_backbone:
frozen_params, trainable_params = hk.data_structures.partition(
filter_fn, updated_params)
else:
trainable_params = updated_params
frozen_params = {}
logging.info('Loading pretrained model finished.')
return trainable_params, frozen_params, state
|
dm_nevis-master
|
experiments_jax/environment/pretrained_model_loader.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_jax/environment/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full AlphaFold protein structure prediction script."""
import enum
import json
import os
import pathlib
import pickle
import random
import shutil
import sys
import time
from typing import Any, Dict, Union
from absl import app
from absl import flags
from absl import logging
from alphafold.common import confidence
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.data import pipeline
from alphafold.data import pipeline_multimer
from alphafold.data import templates
from alphafold.data.tools import hhsearch
from alphafold.data.tools import hmmsearch
from alphafold.model import config
from alphafold.model import data
from alphafold.model import model
from alphafold.relax import relax
import jax.numpy as jnp
import numpy as np
# Internal import (7716).
logging.set_verbosity(logging.INFO)
@enum.unique
class ModelsToRelax(enum.Enum):
ALL = 0
BEST = 1
NONE = 2
flags.DEFINE_list(
'fasta_paths', None, 'Paths to FASTA files, each containing a prediction '
'target that will be folded one after another. If a FASTA file contains '
'multiple sequences, then it will be folded as a multimer. Paths should be '
'separated by commas. All FASTA paths must have a unique basename as the '
'basename is used to name the output directories for each prediction.')
flags.DEFINE_string('data_dir', None, 'Path to directory of supporting data.')
flags.DEFINE_string('output_dir', None, 'Path to a directory that will '
'store the results.')
flags.DEFINE_string('jackhmmer_binary_path', shutil.which('jackhmmer'),
'Path to the JackHMMER executable.')
flags.DEFINE_string('hhblits_binary_path', shutil.which('hhblits'),
'Path to the HHblits executable.')
flags.DEFINE_string('hhsearch_binary_path', shutil.which('hhsearch'),
'Path to the HHsearch executable.')
flags.DEFINE_string('hmmsearch_binary_path', shutil.which('hmmsearch'),
'Path to the hmmsearch executable.')
flags.DEFINE_string('hmmbuild_binary_path', shutil.which('hmmbuild'),
'Path to the hmmbuild executable.')
flags.DEFINE_string('kalign_binary_path', shutil.which('kalign'),
'Path to the Kalign executable.')
flags.DEFINE_string('uniref90_database_path', None, 'Path to the Uniref90 '
'database for use by JackHMMER.')
flags.DEFINE_string('mgnify_database_path', None, 'Path to the MGnify '
'database for use by JackHMMER.')
flags.DEFINE_string('bfd_database_path', None, 'Path to the BFD '
'database for use by HHblits.')
flags.DEFINE_string('small_bfd_database_path', None, 'Path to the small '
'version of BFD used with the "reduced_dbs" preset.')
flags.DEFINE_string('uniref30_database_path', None, 'Path to the UniRef30 '
'database for use by HHblits.')
flags.DEFINE_string('uniprot_database_path', None, 'Path to the Uniprot '
'database for use by JackHMMer.')
flags.DEFINE_string('pdb70_database_path', None, 'Path to the PDB70 '
'database for use by HHsearch.')
flags.DEFINE_string('pdb_seqres_database_path', None, 'Path to the PDB '
'seqres database for use by hmmsearch.')
flags.DEFINE_string('template_mmcif_dir', None, 'Path to a directory with '
'template mmCIF structures, each named <pdb_id>.cif')
flags.DEFINE_string('max_template_date', None, 'Maximum template release date '
'to consider. Important if folding historical test sets.')
flags.DEFINE_string('obsolete_pdbs_path', None, 'Path to file containing a '
'mapping from obsolete PDB IDs to the PDB IDs of their '
'replacements.')
flags.DEFINE_enum('db_preset', 'full_dbs',
['full_dbs', 'reduced_dbs'],
'Choose preset MSA database configuration - '
'smaller genetic database config (reduced_dbs) or '
'full genetic database config (full_dbs)')
flags.DEFINE_enum('model_preset', 'monomer',
['monomer', 'monomer_casp14', 'monomer_ptm', 'multimer'],
'Choose preset model configuration - the monomer model, '
'the monomer model with extra ensembling, monomer model with '
'pTM head, or multimer model')
flags.DEFINE_boolean('benchmark', False, 'Run multiple JAX model evaluations '
'to obtain a timing that excludes the compilation time, '
'which should be more indicative of the time required for '
'inferencing many proteins.')
flags.DEFINE_integer('random_seed', None, 'The random seed for the data '
'pipeline. By default, this is randomly generated. Note '
'that even if this is set, Alphafold may still not be '
'deterministic, because processes like GPU inference are '
'nondeterministic.')
flags.DEFINE_integer('num_multimer_predictions_per_model', 5, 'How many '
'predictions (each with a different random seed) will be '
'generated per model. E.g. if this is 2 and there are 5 '
'models then there will be 10 predictions per input. '
'Note: this FLAG only applies if model_preset=multimer')
flags.DEFINE_boolean('use_precomputed_msas', False, 'Whether to read MSAs that '
'have been written to disk instead of running the MSA '
'tools. The MSA files are looked up in the output '
'directory, so it must stay the same between multiple '
'runs that are to reuse the MSAs. WARNING: This will not '
'check if the sequence, database or configuration have '
'changed.')
flags.DEFINE_enum_class('models_to_relax', ModelsToRelax.BEST, ModelsToRelax,
'The models to run the final relaxation step on. '
'If `all`, all models are relaxed, which may be time '
'consuming. If `best`, only the most confident model '
'is relaxed. If `none`, relaxation is not run. Turning '
'off relaxation might result in predictions with '
'distracting stereochemical violations but might help '
'in case you are having issues with the relaxation '
'stage.')
flags.DEFINE_boolean('use_gpu_relax', None, 'Whether to relax on GPU. '
'Relax on GPU can be much faster than CPU, so it is '
'recommended to enable if possible. GPUs must be available'
' if this setting is enabled.')
FLAGS = flags.FLAGS
MAX_TEMPLATE_HITS = 20
RELAX_MAX_ITERATIONS = 0
RELAX_ENERGY_TOLERANCE = 2.39
RELAX_STIFFNESS = 10.0
RELAX_EXCLUDE_RESIDUES = []
RELAX_MAX_OUTER_ITERATIONS = 3
def _check_flag(flag_name: str,
other_flag_name: str,
should_be_set: bool):
if should_be_set != bool(FLAGS[flag_name].value):
verb = 'be' if should_be_set else 'not be'
raise ValueError(f'{flag_name} must {verb} set when running with '
f'"--{other_flag_name}={FLAGS[other_flag_name].value}".')
def _jnp_to_np(output: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively changes jax arrays to numpy arrays."""
for k, v in output.items():
if isinstance(v, dict):
output[k] = _jnp_to_np(v)
elif isinstance(v, jnp.ndarray):
output[k] = np.array(v)
return output
def _save_confidence_json_file(
plddt: np.ndarray, output_dir: str, model_name: str
) -> None:
confidence_json = confidence.confidence_json(plddt)
# Save the confidence json.
confidence_json_output_path = os.path.join(
output_dir, f'confidence_{model_name}.json'
)
with open(confidence_json_output_path, 'w') as f:
f.write(confidence_json)
def _save_mmcif_file(
prot: protein.Protein,
output_dir: str,
model_name: str,
file_id: str,
model_type: str,
) -> None:
"""Crate mmCIF string and save to a file.
Args:
prot: Protein object.
output_dir: Directory to which files are saved.
model_name: Name of a model.
file_id: The file ID (usually the PDB ID) to be used in the mmCIF.
model_type: Monomer or multimer.
"""
mmcif_string = protein.to_mmcif(prot, file_id, model_type)
# Save the MMCIF.
mmcif_output_path = os.path.join(output_dir, f'{model_name}.cif')
with open(mmcif_output_path, 'w') as f:
f.write(mmcif_string)
def _save_pae_json_file(
pae: np.ndarray, max_pae: float, output_dir: str, model_name: str
) -> None:
"""Check prediction result for PAE data and save to a JSON file if present.
Args:
pae: The n_res x n_res PAE array.
max_pae: The maximum possible PAE value.
output_dir: Directory to which files are saved.
model_name: Name of a model.
"""
pae_json = confidence.pae_json(pae, max_pae)
# Save the PAE json.
pae_json_output_path = os.path.join(output_dir, f'pae_{model_name}.json')
with open(pae_json_output_path, 'w') as f:
f.write(pae_json)
def predict_structure(
fasta_path: str,
fasta_name: str,
output_dir_base: str,
data_pipeline: Union[pipeline.DataPipeline, pipeline_multimer.DataPipeline],
model_runners: Dict[str, model.RunModel],
amber_relaxer: relax.AmberRelaxation,
benchmark: bool,
random_seed: int,
models_to_relax: ModelsToRelax,
model_type: str,
):
"""Predicts structure using AlphaFold for the given sequence."""
logging.info('Predicting %s', fasta_name)
timings = {}
output_dir = os.path.join(output_dir_base, fasta_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
msa_output_dir = os.path.join(output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
# Get features.
t_0 = time.time()
feature_dict = data_pipeline.process(
input_fasta_path=fasta_path,
msa_output_dir=msa_output_dir)
timings['features'] = time.time() - t_0
# Write out features as a pickled dictionary.
features_output_path = os.path.join(output_dir, 'features.pkl')
with open(features_output_path, 'wb') as f:
pickle.dump(feature_dict, f, protocol=4)
unrelaxed_pdbs = {}
unrelaxed_proteins = {}
relaxed_pdbs = {}
relax_metrics = {}
ranking_confidences = {}
# Run the models.
num_models = len(model_runners)
for model_index, (model_name, model_runner) in enumerate(
model_runners.items()):
logging.info('Running model %s on %s', model_name, fasta_name)
t_0 = time.time()
model_random_seed = model_index + random_seed * num_models
processed_feature_dict = model_runner.process_features(
feature_dict, random_seed=model_random_seed)
timings[f'process_features_{model_name}'] = time.time() - t_0
t_0 = time.time()
prediction_result = model_runner.predict(processed_feature_dict,
random_seed=model_random_seed)
t_diff = time.time() - t_0
timings[f'predict_and_compile_{model_name}'] = t_diff
logging.info(
'Total JAX model %s on %s predict time (includes compilation time, see --benchmark): %.1fs',
model_name, fasta_name, t_diff)
if benchmark:
t_0 = time.time()
model_runner.predict(processed_feature_dict,
random_seed=model_random_seed)
t_diff = time.time() - t_0
timings[f'predict_benchmark_{model_name}'] = t_diff
logging.info(
'Total JAX model %s on %s predict time (excludes compilation time): %.1fs',
model_name, fasta_name, t_diff)
plddt = prediction_result['plddt']
_save_confidence_json_file(plddt, output_dir, model_name)
ranking_confidences[model_name] = prediction_result['ranking_confidence']
if (
'predicted_aligned_error' in prediction_result
and 'max_predicted_aligned_error' in prediction_result
):
pae = prediction_result['predicted_aligned_error']
max_pae = prediction_result['max_predicted_aligned_error']
_save_pae_json_file(pae, float(max_pae), output_dir, model_name)
# Remove jax dependency from results.
np_prediction_result = _jnp_to_np(dict(prediction_result))
# Save the model outputs.
result_output_path = os.path.join(output_dir, f'result_{model_name}.pkl')
with open(result_output_path, 'wb') as f:
pickle.dump(np_prediction_result, f, protocol=4)
# Add the predicted LDDT in the b-factor column.
# Note that higher predicted LDDT value means higher model confidence.
plddt_b_factors = np.repeat(
plddt[:, None], residue_constants.atom_type_num, axis=-1)
unrelaxed_protein = protein.from_prediction(
features=processed_feature_dict,
result=prediction_result,
b_factors=plddt_b_factors,
remove_leading_feature_dimension=not model_runner.multimer_mode)
unrelaxed_proteins[model_name] = unrelaxed_protein
unrelaxed_pdbs[model_name] = protein.to_pdb(unrelaxed_protein)
unrelaxed_pdb_path = os.path.join(output_dir, f'unrelaxed_{model_name}.pdb')
with open(unrelaxed_pdb_path, 'w') as f:
f.write(unrelaxed_pdbs[model_name])
_save_mmcif_file(
prot=unrelaxed_protein,
output_dir=output_dir,
model_name=f'unrelaxed_{model_name}',
file_id=str(model_index),
model_type=model_type,
)
# Rank by model confidence.
ranked_order = [
model_name for model_name, confidence in
sorted(ranking_confidences.items(), key=lambda x: x[1], reverse=True)]
# Relax predictions.
if models_to_relax == ModelsToRelax.BEST:
to_relax = [ranked_order[0]]
elif models_to_relax == ModelsToRelax.ALL:
to_relax = ranked_order
elif models_to_relax == ModelsToRelax.NONE:
to_relax = []
for model_name in to_relax:
t_0 = time.time()
relaxed_pdb_str, _, violations = amber_relaxer.process(
prot=unrelaxed_proteins[model_name])
relax_metrics[model_name] = {
'remaining_violations': violations,
'remaining_violations_count': sum(violations)
}
timings[f'relax_{model_name}'] = time.time() - t_0
relaxed_pdbs[model_name] = relaxed_pdb_str
# Save the relaxed PDB.
relaxed_output_path = os.path.join(
output_dir, f'relaxed_{model_name}.pdb')
with open(relaxed_output_path, 'w') as f:
f.write(relaxed_pdb_str)
relaxed_protein = protein.from_pdb_string(relaxed_pdb_str)
_save_mmcif_file(
prot=relaxed_protein,
output_dir=output_dir,
model_name=f'relaxed_{model_name}',
file_id='0',
model_type=model_type,
)
# Write out relaxed PDBs in rank order.
for idx, model_name in enumerate(ranked_order):
ranked_output_path = os.path.join(output_dir, f'ranked_{idx}.pdb')
with open(ranked_output_path, 'w') as f:
if model_name in relaxed_pdbs:
f.write(relaxed_pdbs[model_name])
else:
f.write(unrelaxed_pdbs[model_name])
if model_name in relaxed_pdbs:
protein_instance = protein.from_pdb_string(relaxed_pdbs[model_name])
else:
protein_instance = protein.from_pdb_string(unrelaxed_pdbs[model_name])
_save_mmcif_file(
prot=protein_instance,
output_dir=output_dir,
model_name=f'ranked_{idx}',
file_id=str(idx),
model_type=model_type,
)
ranking_output_path = os.path.join(output_dir, 'ranking_debug.json')
with open(ranking_output_path, 'w') as f:
label = 'iptm+ptm' if 'iptm' in prediction_result else 'plddts'
f.write(json.dumps(
{label: ranking_confidences, 'order': ranked_order}, indent=4))
logging.info('Final timings for %s: %s', fasta_name, timings)
timings_output_path = os.path.join(output_dir, 'timings.json')
with open(timings_output_path, 'w') as f:
f.write(json.dumps(timings, indent=4))
if models_to_relax != ModelsToRelax.NONE:
relax_metrics_path = os.path.join(output_dir, 'relax_metrics.json')
with open(relax_metrics_path, 'w') as f:
f.write(json.dumps(relax_metrics, indent=4))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
for tool_name in (
'jackhmmer', 'hhblits', 'hhsearch', 'hmmsearch', 'hmmbuild', 'kalign'):
if not FLAGS[f'{tool_name}_binary_path'].value:
raise ValueError(f'Could not find path to the "{tool_name}" binary. Make '
'sure it is installed on your system.')
use_small_bfd = FLAGS.db_preset == 'reduced_dbs'
_check_flag('small_bfd_database_path', 'db_preset',
should_be_set=use_small_bfd)
_check_flag('bfd_database_path', 'db_preset',
should_be_set=not use_small_bfd)
_check_flag('uniref30_database_path', 'db_preset',
should_be_set=not use_small_bfd)
run_multimer_system = 'multimer' in FLAGS.model_preset
model_type = 'Multimer' if run_multimer_system else 'Monomer'
_check_flag('pdb70_database_path', 'model_preset',
should_be_set=not run_multimer_system)
_check_flag('pdb_seqres_database_path', 'model_preset',
should_be_set=run_multimer_system)
_check_flag('uniprot_database_path', 'model_preset',
should_be_set=run_multimer_system)
if FLAGS.model_preset == 'monomer_casp14':
num_ensemble = 8
else:
num_ensemble = 1
# Check for duplicate FASTA file names.
fasta_names = [pathlib.Path(p).stem for p in FLAGS.fasta_paths]
if len(fasta_names) != len(set(fasta_names)):
raise ValueError('All FASTA paths must have a unique basename.')
if run_multimer_system:
template_searcher = hmmsearch.Hmmsearch(
binary_path=FLAGS.hmmsearch_binary_path,
hmmbuild_binary_path=FLAGS.hmmbuild_binary_path,
database_path=FLAGS.pdb_seqres_database_path)
template_featurizer = templates.HmmsearchHitFeaturizer(
mmcif_dir=FLAGS.template_mmcif_dir,
max_template_date=FLAGS.max_template_date,
max_hits=MAX_TEMPLATE_HITS,
kalign_binary_path=FLAGS.kalign_binary_path,
release_dates_path=None,
obsolete_pdbs_path=FLAGS.obsolete_pdbs_path)
else:
template_searcher = hhsearch.HHSearch(
binary_path=FLAGS.hhsearch_binary_path,
databases=[FLAGS.pdb70_database_path])
template_featurizer = templates.HhsearchHitFeaturizer(
mmcif_dir=FLAGS.template_mmcif_dir,
max_template_date=FLAGS.max_template_date,
max_hits=MAX_TEMPLATE_HITS,
kalign_binary_path=FLAGS.kalign_binary_path,
release_dates_path=None,
obsolete_pdbs_path=FLAGS.obsolete_pdbs_path)
monomer_data_pipeline = pipeline.DataPipeline(
jackhmmer_binary_path=FLAGS.jackhmmer_binary_path,
hhblits_binary_path=FLAGS.hhblits_binary_path,
uniref90_database_path=FLAGS.uniref90_database_path,
mgnify_database_path=FLAGS.mgnify_database_path,
bfd_database_path=FLAGS.bfd_database_path,
uniref30_database_path=FLAGS.uniref30_database_path,
small_bfd_database_path=FLAGS.small_bfd_database_path,
template_searcher=template_searcher,
template_featurizer=template_featurizer,
use_small_bfd=use_small_bfd,
use_precomputed_msas=FLAGS.use_precomputed_msas)
if run_multimer_system:
num_predictions_per_model = FLAGS.num_multimer_predictions_per_model
data_pipeline = pipeline_multimer.DataPipeline(
monomer_data_pipeline=monomer_data_pipeline,
jackhmmer_binary_path=FLAGS.jackhmmer_binary_path,
uniprot_database_path=FLAGS.uniprot_database_path,
use_precomputed_msas=FLAGS.use_precomputed_msas)
else:
num_predictions_per_model = 1
data_pipeline = monomer_data_pipeline
model_runners = {}
model_names = config.MODEL_PRESETS[FLAGS.model_preset]
for model_name in model_names:
model_config = config.model_config(model_name)
if run_multimer_system:
model_config.model.num_ensemble_eval = num_ensemble
else:
model_config.data.eval.num_ensemble = num_ensemble
model_params = data.get_model_haiku_params(
model_name=model_name, data_dir=FLAGS.data_dir)
model_runner = model.RunModel(model_config, model_params)
for i in range(num_predictions_per_model):
model_runners[f'{model_name}_pred_{i}'] = model_runner
logging.info('Have %d models: %s', len(model_runners),
list(model_runners.keys()))
amber_relaxer = relax.AmberRelaxation(
max_iterations=RELAX_MAX_ITERATIONS,
tolerance=RELAX_ENERGY_TOLERANCE,
stiffness=RELAX_STIFFNESS,
exclude_residues=RELAX_EXCLUDE_RESIDUES,
max_outer_iterations=RELAX_MAX_OUTER_ITERATIONS,
use_gpu=FLAGS.use_gpu_relax)
random_seed = FLAGS.random_seed
if random_seed is None:
random_seed = random.randrange(sys.maxsize // len(model_runners))
logging.info('Using random seed %d for the data pipeline', random_seed)
# Predict structure for each of the sequences.
for i, fasta_path in enumerate(FLAGS.fasta_paths):
fasta_name = fasta_names[i]
predict_structure(
fasta_path=fasta_path,
fasta_name=fasta_name,
output_dir_base=FLAGS.output_dir,
data_pipeline=data_pipeline,
model_runners=model_runners,
amber_relaxer=amber_relaxer,
benchmark=FLAGS.benchmark,
random_seed=random_seed,
models_to_relax=FLAGS.models_to_relax,
model_type=model_type,
)
if __name__ == '__main__':
flags.mark_flags_as_required([
'fasta_paths',
'output_dir',
'data_dir',
'uniref90_database_path',
'mgnify_database_path',
'template_mmcif_dir',
'max_template_date',
'obsolete_pdbs_path',
'use_gpu_relax',
])
app.run(main)
|
alphafold-main
|
run_alphafold.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
from alphafold import version
from setuptools import find_packages
from setuptools import setup
setup(
name='alphafold',
version=version.__version__,
description=(
'An implementation of the inference pipeline of AlphaFold v2.0. This is'
' a completely new model that was entered as AlphaFold2 in CASP14 and'
' published in Nature.'
),
author='DeepMind',
author_email='alphafold@deepmind.com',
license='Apache License, Version 2.0',
url='https://github.com/deepmind/alphafold',
packages=find_packages(),
install_requires=[
'absl-py',
'biopython',
'chex',
'dm-haiku',
'dm-tree',
'docker',
'immutabledict',
'jax',
'ml-collections',
'numpy',
'pandas',
'scipy',
'tensorflow-cpu',
],
tests_require=[
'matplotlib', # For notebook_utils_test.
'mock',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
|
alphafold-main
|
setup.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_alphafold."""
import json
import os
from absl.testing import absltest
from absl.testing import parameterized
import run_alphafold
import mock
import numpy as np
# Internal import (7716).
TEST_DATA_DIR = 'alphafold/common/testdata/'
class RunAlphafoldTest(parameterized.TestCase):
@parameterized.named_parameters(
('relax', run_alphafold.ModelsToRelax.ALL),
('no_relax', run_alphafold.ModelsToRelax.NONE),
)
def test_end_to_end(self, models_to_relax):
data_pipeline_mock = mock.Mock()
model_runner_mock = mock.Mock()
amber_relaxer_mock = mock.Mock()
data_pipeline_mock.process.return_value = {}
model_runner_mock.process_features.return_value = {
'aatype': np.zeros((12, 10), dtype=np.int32),
'residue_index': np.tile(np.arange(10, dtype=np.int32)[None], (12, 1)),
}
model_runner_mock.predict.return_value = {
'structure_module': {
'final_atom_positions': np.zeros((10, 37, 3)),
'final_atom_mask': np.ones((10, 37)),
},
'predicted_lddt': {
'logits': np.ones((10, 50)),
},
'plddt': np.ones(10) * 42,
'ranking_confidence': 90,
'ptm': np.array(0.),
'aligned_confidence_probs': np.zeros((10, 10, 50)),
'predicted_aligned_error': np.zeros((10, 10)),
'max_predicted_aligned_error': np.array(0.),
}
model_runner_mock.multimer_mode = False
with open(
os.path.join(
absltest.get_default_test_srcdir(), TEST_DATA_DIR, 'glucagon.pdb'
)
) as f:
pdb_string = f.read()
amber_relaxer_mock.process.return_value = (
pdb_string,
None,
[1.0, 0.0, 0.0],
)
out_dir = self.create_tempdir().full_path
fasta_path = os.path.join(out_dir, 'target.fasta')
with open(fasta_path, 'wt') as f:
f.write('>A\nAAAAAAAAAAAAA')
fasta_name = 'test'
run_alphafold.predict_structure(
fasta_path=fasta_path,
fasta_name=fasta_name,
output_dir_base=out_dir,
data_pipeline=data_pipeline_mock,
model_runners={'model1': model_runner_mock},
amber_relaxer=amber_relaxer_mock,
benchmark=False,
random_seed=0,
models_to_relax=models_to_relax,
model_type='Monomer',
)
base_output_files = os.listdir(out_dir)
self.assertIn('target.fasta', base_output_files)
self.assertIn('test', base_output_files)
target_output_files = os.listdir(os.path.join(out_dir, 'test'))
expected_files = [
'confidence_model1.json',
'features.pkl',
'msas',
'pae_model1.json',
'ranked_0.cif',
'ranked_0.pdb',
'ranking_debug.json',
'result_model1.pkl',
'timings.json',
'unrelaxed_model1.cif',
'unrelaxed_model1.pdb',
]
if models_to_relax == run_alphafold.ModelsToRelax.ALL:
expected_files.extend(
['relaxed_model1.cif', 'relaxed_model1.pdb', 'relax_metrics.json']
)
with open(os.path.join(out_dir, 'test', 'relax_metrics.json')) as f:
relax_metrics = json.loads(f.read())
self.assertDictEqual({'model1': {'remaining_violations': [1.0, 0.0, 0.0],
'remaining_violations_count': 1.0}},
relax_metrics)
self.assertCountEqual(expected_files, target_output_files)
# Check that pLDDT is set in the B-factor column.
with open(os.path.join(out_dir, 'test', 'unrelaxed_model1.pdb')) as f:
for line in f:
if line.startswith('ATOM'):
self.assertEqual(line[61:66], '42.00')
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
run_alphafold_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker launch script for Alphafold docker image."""
import os
import pathlib
import signal
from typing import Tuple
from absl import app
from absl import flags
from absl import logging
import docker
from docker import types
flags.DEFINE_bool(
'use_gpu', True, 'Enable NVIDIA runtime to run with GPUs.')
flags.DEFINE_enum('models_to_relax', 'best', ['best', 'all', 'none'],
'The models to run the final relaxation step on. '
'If `all`, all models are relaxed, which may be time '
'consuming. If `best`, only the most confident model is '
'relaxed. If `none`, relaxation is not run. Turning off '
'relaxation might result in predictions with '
'distracting stereochemical violations but might help '
'in case you are having issues with the relaxation '
'stage.')
flags.DEFINE_bool(
'enable_gpu_relax', True, 'Run relax on GPU if GPU is enabled.')
flags.DEFINE_string(
'gpu_devices', 'all',
'Comma separated list of devices to pass to NVIDIA_VISIBLE_DEVICES.')
flags.DEFINE_list(
'fasta_paths', None, 'Paths to FASTA files, each containing a prediction '
'target that will be folded one after another. If a FASTA file contains '
'multiple sequences, then it will be folded as a multimer. Paths should be '
'separated by commas. All FASTA paths must have a unique basename as the '
'basename is used to name the output directories for each prediction.')
flags.DEFINE_string(
'output_dir', '/tmp/alphafold',
'Path to a directory that will store the results.')
flags.DEFINE_string(
'data_dir', None,
'Path to directory with supporting data: AlphaFold parameters and genetic '
'and template databases. Set to the target of download_all_databases.sh.')
flags.DEFINE_string(
'docker_image_name', 'alphafold', 'Name of the AlphaFold Docker image.')
flags.DEFINE_string(
'max_template_date', None,
'Maximum template release date to consider (ISO-8601 format: YYYY-MM-DD). '
'Important if folding historical test sets.')
flags.DEFINE_enum(
'db_preset', 'full_dbs', ['full_dbs', 'reduced_dbs'],
'Choose preset MSA database configuration - smaller genetic database '
'config (reduced_dbs) or full genetic database config (full_dbs)')
flags.DEFINE_enum(
'model_preset', 'monomer',
['monomer', 'monomer_casp14', 'monomer_ptm', 'multimer'],
'Choose preset model configuration - the monomer model, the monomer model '
'with extra ensembling, monomer model with pTM head, or multimer model')
flags.DEFINE_integer('num_multimer_predictions_per_model', 5, 'How many '
'predictions (each with a different random seed) will be '
'generated per model. E.g. if this is 2 and there are 5 '
'models then there will be 10 predictions per input. '
'Note: this FLAG only applies if model_preset=multimer')
flags.DEFINE_boolean(
'benchmark', False,
'Run multiple JAX model evaluations to obtain a timing that excludes the '
'compilation time, which should be more indicative of the time required '
'for inferencing many proteins.')
flags.DEFINE_boolean(
'use_precomputed_msas', False,
'Whether to read MSAs that have been written to disk instead of running '
'the MSA tools. The MSA files are looked up in the output directory, so it '
'must stay the same between multiple runs that are to reuse the MSAs. '
'WARNING: This will not check if the sequence, database or configuration '
'have changed.')
flags.DEFINE_string(
'docker_user', f'{os.geteuid()}:{os.getegid()}',
'UID:GID with which to run the Docker container. The output directories '
'will be owned by this user:group. By default, this is the current user. '
'Valid options are: uid or uid:gid, non-numeric values are not recognised '
'by Docker unless that user has been created within the container.')
FLAGS = flags.FLAGS
_ROOT_MOUNT_DIRECTORY = '/mnt/'
def _create_mount(mount_name: str, path: str) -> Tuple[types.Mount, str]:
"""Create a mount point for each file and directory used by the model."""
path = pathlib.Path(path).absolute()
target_path = pathlib.Path(_ROOT_MOUNT_DIRECTORY, mount_name)
if path.is_dir():
source_path = path
mounted_path = target_path
else:
source_path = path.parent
mounted_path = pathlib.Path(target_path, path.name)
if not source_path.exists():
raise ValueError(f'Failed to find source directory "{source_path}" to '
'mount in Docker container.')
logging.info('Mounting %s -> %s', source_path, target_path)
mount = types.Mount(target=str(target_path), source=str(source_path),
type='bind', read_only=True)
return mount, str(mounted_path)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# You can individually override the following paths if you have placed the
# data in locations other than the FLAGS.data_dir.
# Path to the Uniref90 database for use by JackHMMER.
uniref90_database_path = os.path.join(
FLAGS.data_dir, 'uniref90', 'uniref90.fasta')
# Path to the Uniprot database for use by JackHMMER.
uniprot_database_path = os.path.join(
FLAGS.data_dir, 'uniprot', 'uniprot.fasta')
# Path to the MGnify database for use by JackHMMER.
mgnify_database_path = os.path.join(
FLAGS.data_dir, 'mgnify', 'mgy_clusters_2022_05.fa')
# Path to the BFD database for use by HHblits.
bfd_database_path = os.path.join(
FLAGS.data_dir, 'bfd',
'bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt')
# Path to the Small BFD database for use by JackHMMER.
small_bfd_database_path = os.path.join(
FLAGS.data_dir, 'small_bfd', 'bfd-first_non_consensus_sequences.fasta')
# Path to the Uniref30 database for use by HHblits.
uniref30_database_path = os.path.join(
FLAGS.data_dir, 'uniref30', 'UniRef30_2021_03')
# Path to the PDB70 database for use by HHsearch.
pdb70_database_path = os.path.join(FLAGS.data_dir, 'pdb70', 'pdb70')
# Path to the PDB seqres database for use by hmmsearch.
pdb_seqres_database_path = os.path.join(
FLAGS.data_dir, 'pdb_seqres', 'pdb_seqres.txt')
# Path to a directory with template mmCIF structures, each named <pdb_id>.cif.
template_mmcif_dir = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'mmcif_files')
# Path to a file mapping obsolete PDB IDs to their replacements.
obsolete_pdbs_path = os.path.join(FLAGS.data_dir, 'pdb_mmcif', 'obsolete.dat')
alphafold_path = pathlib.Path(__file__).parent.parent
data_dir_path = pathlib.Path(FLAGS.data_dir)
if alphafold_path == data_dir_path or alphafold_path in data_dir_path.parents:
raise app.UsageError(
f'The download directory {FLAGS.data_dir} should not be a subdirectory '
f'in the AlphaFold repository directory. If it is, the Docker build is '
f'slow since the large databases are copied during the image creation.')
mounts = []
command_args = []
# Mount each fasta path as a unique target directory.
target_fasta_paths = []
for i, fasta_path in enumerate(FLAGS.fasta_paths):
mount, target_path = _create_mount(f'fasta_path_{i}', fasta_path)
mounts.append(mount)
target_fasta_paths.append(target_path)
command_args.append(f'--fasta_paths={",".join(target_fasta_paths)}')
database_paths = [
('uniref90_database_path', uniref90_database_path),
('mgnify_database_path', mgnify_database_path),
('data_dir', FLAGS.data_dir),
('template_mmcif_dir', template_mmcif_dir),
('obsolete_pdbs_path', obsolete_pdbs_path),
]
if FLAGS.model_preset == 'multimer':
database_paths.append(('uniprot_database_path', uniprot_database_path))
database_paths.append(('pdb_seqres_database_path',
pdb_seqres_database_path))
else:
database_paths.append(('pdb70_database_path', pdb70_database_path))
if FLAGS.db_preset == 'reduced_dbs':
database_paths.append(('small_bfd_database_path', small_bfd_database_path))
else:
database_paths.extend([
('uniref30_database_path', uniref30_database_path),
('bfd_database_path', bfd_database_path),
])
for name, path in database_paths:
if path:
mount, target_path = _create_mount(name, path)
mounts.append(mount)
command_args.append(f'--{name}={target_path}')
output_target_path = os.path.join(_ROOT_MOUNT_DIRECTORY, 'output')
mounts.append(types.Mount(output_target_path, FLAGS.output_dir, type='bind'))
use_gpu_relax = FLAGS.enable_gpu_relax and FLAGS.use_gpu
command_args.extend([
f'--output_dir={output_target_path}',
f'--max_template_date={FLAGS.max_template_date}',
f'--db_preset={FLAGS.db_preset}',
f'--model_preset={FLAGS.model_preset}',
f'--benchmark={FLAGS.benchmark}',
f'--use_precomputed_msas={FLAGS.use_precomputed_msas}',
f'--num_multimer_predictions_per_model={FLAGS.num_multimer_predictions_per_model}',
f'--models_to_relax={FLAGS.models_to_relax}',
f'--use_gpu_relax={use_gpu_relax}',
'--logtostderr',
])
client = docker.from_env()
device_requests = [
docker.types.DeviceRequest(driver='nvidia', capabilities=[['gpu']])
] if FLAGS.use_gpu else None
container = client.containers.run(
image=FLAGS.docker_image_name,
command=command_args,
device_requests=device_requests,
remove=True,
detach=True,
mounts=mounts,
user=FLAGS.docker_user,
environment={
'NVIDIA_VISIBLE_DEVICES': FLAGS.gpu_devices,
# The following flags allow us to make predictions on proteins that
# would typically be too long to fit into GPU memory.
'TF_FORCE_UNIFIED_MEMORY': '1',
'XLA_PYTHON_CLIENT_MEM_FRACTION': '4.0',
})
# Add signal handler to ensure CTRL+C also stops the running container.
signal.signal(signal.SIGINT,
lambda unused_sig, unused_frame: container.kill())
for line in container.logs(stream=True):
logging.info(line.strip().decode('utf-8'))
if __name__ == '__main__':
flags.mark_flags_as_required([
'data_dir',
'fasta_paths',
'max_template_date',
])
app.run(main)
|
alphafold-main
|
docker/run_docker.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for the AlphaFold version."""
__version__ = '2.3.2'
|
alphafold-main
|
alphafold/version.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the inference pipeline of AlphaFold v2.0."""
|
alphafold-main
|
alphafold/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for processing confidence metrics."""
import json
from typing import Dict, Optional, Tuple
import numpy as np
import scipy.special
def compute_plddt(logits: np.ndarray) -> np.ndarray:
"""Computes per-residue pLDDT from logits.
Args:
logits: [num_res, num_bins] output from the PredictedLDDTHead.
Returns:
plddt: [num_res] per-residue pLDDT.
"""
num_bins = logits.shape[-1]
bin_width = 1.0 / num_bins
bin_centers = np.arange(start=0.5 * bin_width, stop=1.0, step=bin_width)
probs = scipy.special.softmax(logits, axis=-1)
predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1)
return predicted_lddt_ca * 100
def _confidence_category(score: float) -> str:
"""Categorizes pLDDT into: disordered (D), low (L), medium (M), high (H)."""
if 0 <= score < 50:
return 'D'
if 50 <= score < 70:
return 'L'
elif 70 <= score < 90:
return 'M'
elif 90 <= score <= 100:
return 'H'
else:
raise ValueError(f'Invalid pLDDT score {score}')
def confidence_json(plddt: np.ndarray) -> str:
"""Returns JSON with confidence score and category for every residue.
Args:
plddt: Per-residue confidence metric data.
Returns:
String with a formatted JSON.
Raises:
ValueError: If `plddt` has a rank different than 1.
"""
if plddt.ndim != 1:
raise ValueError(f'The plddt array must be rank 1, got: {plddt.shape}.')
confidence = {
'residueNumber': list(range(1, len(plddt) + 1)),
'confidenceScore': [round(float(s), 2) for s in plddt],
'confidenceCategory': [_confidence_category(s) for s in plddt],
}
return json.dumps(confidence, indent=None, separators=(',', ':'))
def _calculate_bin_centers(breaks: np.ndarray):
"""Gets the bin centers from the bin edges.
Args:
breaks: [num_bins - 1] the error bin edges.
Returns:
bin_centers: [num_bins] the error bin centers.
"""
step = (breaks[1] - breaks[0])
# Add half-step to get the center
bin_centers = breaks + step / 2
# Add a catch-all bin at the end.
bin_centers = np.concatenate([bin_centers, [bin_centers[-1] + step]],
axis=0)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: np.ndarray,
aligned_distance_error_probs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Calculates expected aligned distance errors for every pair of residues.
Args:
alignment_confidence_breaks: [num_bins - 1] the error bin edges.
aligned_distance_error_probs: [num_res, num_res, num_bins] the predicted
probs for each error bin, for each pair of residues.
Returns:
predicted_aligned_error: [num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: The maximum predicted error possible.
"""
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
# Tuple of expected aligned distance error and max possible error.
return (np.sum(aligned_distance_error_probs * bin_centers, axis=-1),
np.asarray(bin_centers[-1]))
def compute_predicted_aligned_error(
logits: np.ndarray,
breaks: np.ndarray) -> Dict[str, np.ndarray]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
breaks: [num_bins - 1] the error bin edges.
Returns:
aligned_confidence_probs: [num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: The maximum predicted error possible.
"""
aligned_confidence_probs = scipy.special.softmax(
logits,
axis=-1)
predicted_aligned_error, max_predicted_aligned_error = (
_calculate_expected_aligned_error(
alignment_confidence_breaks=breaks,
aligned_distance_error_probs=aligned_confidence_probs))
return {
'aligned_confidence_probs': aligned_confidence_probs,
'predicted_aligned_error': predicted_aligned_error,
'max_predicted_aligned_error': max_predicted_aligned_error,
}
def pae_json(pae: np.ndarray, max_pae: float) -> str:
"""Returns the PAE in the same format as is used in the AFDB.
Note that the values are presented as floats to 1 decimal place, whereas AFDB
returns integer values.
Args:
pae: The n_res x n_res PAE array.
max_pae: The maximum possible PAE value.
Returns:
PAE output format as a JSON string.
"""
# Check the PAE array is the correct shape.
if pae.ndim != 2 or pae.shape[0] != pae.shape[1]:
raise ValueError(f'PAE must be a square matrix, got {pae.shape}')
# Round the predicted aligned errors to 1 decimal place.
rounded_errors = np.round(pae.astype(np.float64), decimals=1)
formatted_output = [{
'predicted_aligned_error': rounded_errors.tolist(),
'max_predicted_aligned_error': max_pae,
}]
return json.dumps(formatted_output, indent=None, separators=(',', ':'))
def predicted_tm_score(
logits: np.ndarray,
breaks: np.ndarray,
residue_weights: Optional[np.ndarray] = None,
asym_id: Optional[np.ndarray] = None,
interface: bool = False) -> np.ndarray:
"""Computes predicted TM alignment or predicted interface TM alignment score.
Args:
logits: [num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
breaks: [num_bins] the error bins.
residue_weights: [num_res] the per residue weights to use for the
expectation.
asym_id: [num_res] the asymmetric unit ID - the chain ID. Only needed for
ipTM calculation, i.e. when interface=True.
interface: If True, interface predicted TM score is computed.
Returns:
ptm_score: The predicted TM alignment or the predicted iTM score.
"""
# residue_weights has to be in [0, 1], but can be floating-point, i.e. the
# exp. resolved head's probability.
if residue_weights is None:
residue_weights = np.ones(logits.shape[0])
bin_centers = _calculate_bin_centers(breaks)
num_res = int(np.sum(residue_weights))
# Clip num_res to avoid negative/undefined d0.
clipped_num_res = max(num_res, 19)
# Compute d_0(num_res) as defined by TM-score, eqn. (5) in Yang & Skolnick
# "Scoring function for automated assessment of protein structure template
# quality", 2004: http://zhanglab.ccmb.med.umich.edu/papers/2004_3.pdf
d0 = 1.24 * (clipped_num_res - 15) ** (1./3) - 1.8
# Convert logits to probs.
probs = scipy.special.softmax(logits, axis=-1)
# TM-Score term for every bin.
tm_per_bin = 1. / (1 + np.square(bin_centers) / np.square(d0))
# E_distances tm(distance).
predicted_tm_term = np.sum(probs * tm_per_bin, axis=-1)
pair_mask = np.ones(shape=(num_res, num_res), dtype=bool)
if interface:
pair_mask *= asym_id[:, None] != asym_id[None, :]
predicted_tm_term *= pair_mask
pair_residue_weights = pair_mask * (
residue_weights[None, :] * residue_weights[:, None])
normed_residue_mask = pair_residue_weights / (1e-8 + np.sum(
pair_residue_weights, axis=-1, keepdims=True))
per_alignment = np.sum(predicted_tm_term * normed_residue_mask, axis=-1)
return np.asarray(per_alignment[(per_alignment * residue_weights).argmax()])
|
alphafold-main
|
alphafold/common/confidence.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common data types and constants used within Alphafold."""
|
alphafold-main
|
alphafold/common/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test confidence metrics."""
from absl.testing import absltest
from alphafold.common import confidence
import numpy as np
class ConfidenceTest(absltest.TestCase):
def test_pae_json(self):
pae = np.array([[0.01, 13.12345], [20.0987, 0.0]])
pae_json = confidence.pae_json(pae=pae, max_pae=31.75)
self.assertEqual(
pae_json, '[{"predicted_aligned_error":[[0.0,13.1],[20.1,0.0]],'
'"max_predicted_aligned_error":31.75}]')
def test_confidence_json(self):
plddt = np.array([42, 42.42])
confidence_json = confidence.confidence_json(plddt=plddt)
print(confidence_json)
self.assertEqual(
confidence_json,
('{"residueNumber":[1,2],'
'"confidenceScore":[42.0,42.42],'
'"confidenceCategory":["D","D"]}'),
)
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/common/confidence_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that residue_constants generates correct values."""
from absl.testing import absltest
from absl.testing import parameterized
from alphafold.common import residue_constants
import numpy as np
class ResidueConstantsTest(parameterized.TestCase):
@parameterized.parameters(
('ALA', 0),
('CYS', 1),
('HIS', 2),
('MET', 3),
('LYS', 4),
('ARG', 4),
)
def testChiAnglesAtoms(self, residue_name, chi_num):
chi_angles_atoms = residue_constants.chi_angles_atoms[residue_name]
self.assertLen(chi_angles_atoms, chi_num)
for chi_angle_atoms in chi_angles_atoms:
self.assertLen(chi_angle_atoms, 4)
def testChiGroupsForAtom(self):
for k, chi_groups in residue_constants.chi_groups_for_atom.items():
res_name, atom_name = k
for chi_group_i, atom_i in chi_groups:
self.assertEqual(
atom_name,
residue_constants.chi_angles_atoms[res_name][chi_group_i][atom_i])
@parameterized.parameters(
('ALA', 5), ('ARG', 11), ('ASN', 8), ('ASP', 8), ('CYS', 6), ('GLN', 9),
('GLU', 9), ('GLY', 4), ('HIS', 10), ('ILE', 8), ('LEU', 8), ('LYS', 9),
('MET', 8), ('PHE', 11), ('PRO', 7), ('SER', 6), ('THR', 7), ('TRP', 14),
('TYR', 12), ('VAL', 7)
)
def testResidueAtoms(self, atom_name, num_residue_atoms):
residue_atoms = residue_constants.residue_atoms[atom_name]
self.assertLen(residue_atoms, num_residue_atoms)
def testStandardAtomMask(self):
with self.subTest('Check shape'):
self.assertEqual(residue_constants.STANDARD_ATOM_MASK.shape, (21, 37,))
with self.subTest('Check values'):
str_to_row = lambda s: [c == '1' for c in s] # More clear/concise.
np.testing.assert_array_equal(
residue_constants.STANDARD_ATOM_MASK,
np.array([
# NB This was defined by c+p but looks sane.
str_to_row('11111 '), # ALA
str_to_row('111111 1 1 11 1 '), # ARG
str_to_row('111111 11 '), # ASP
str_to_row('111111 11 '), # ASN
str_to_row('11111 1 '), # CYS
str_to_row('111111 1 11 '), # GLU
str_to_row('111111 1 11 '), # GLN
str_to_row('111 1 '), # GLY
str_to_row('111111 11 1 1 '), # HIS
str_to_row('11111 11 1 '), # ILE
str_to_row('111111 11 '), # LEU
str_to_row('111111 1 1 1 '), # LYS
str_to_row('111111 11 '), # MET
str_to_row('111111 11 11 1 '), # PHE
str_to_row('111111 1 '), # PRO
str_to_row('11111 1 '), # SER
str_to_row('11111 1 1 '), # THR
str_to_row('111111 11 11 1 1 11 '), # TRP
str_to_row('111111 11 11 11 '), # TYR
str_to_row('11111 11 '), # VAL
str_to_row(' '), # UNK
]))
with self.subTest('Check row totals'):
# Check each row has the right number of atoms.
for row, restype in enumerate(residue_constants.restypes): # A, R, ...
long_restype = residue_constants.restype_1to3[restype] # ALA, ARG, ...
atoms_names = residue_constants.residue_atoms[
long_restype] # ['C', 'CA', 'CB', 'N', 'O'], ...
self.assertLen(atoms_names,
residue_constants.STANDARD_ATOM_MASK[row, :].sum(),
long_restype)
def testAtomTypes(self):
self.assertEqual(residue_constants.atom_type_num, 37)
self.assertEqual(residue_constants.atom_types[0], 'N')
self.assertEqual(residue_constants.atom_types[1], 'CA')
self.assertEqual(residue_constants.atom_types[2], 'C')
self.assertEqual(residue_constants.atom_types[3], 'CB')
self.assertEqual(residue_constants.atom_types[4], 'O')
self.assertEqual(residue_constants.atom_order['N'], 0)
self.assertEqual(residue_constants.atom_order['CA'], 1)
self.assertEqual(residue_constants.atom_order['C'], 2)
self.assertEqual(residue_constants.atom_order['CB'], 3)
self.assertEqual(residue_constants.atom_order['O'], 4)
self.assertEqual(residue_constants.atom_type_num, 37)
def testRestypes(self):
three_letter_restypes = [
residue_constants.restype_1to3[r] for r in residue_constants.restypes]
for restype, exp_restype in zip(
three_letter_restypes, sorted(residue_constants.restype_1to3.values())):
self.assertEqual(restype, exp_restype)
self.assertEqual(residue_constants.restype_num, 20)
def testSequenceToOneHotHHBlits(self):
one_hot = residue_constants.sequence_to_onehot(
'ABCDEFGHIJKLMNOPQRSTUVWXYZ-', residue_constants.HHBLITS_AA_TO_ID)
exp_one_hot = np.array(
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
np.testing.assert_array_equal(one_hot, exp_one_hot)
def testSequenceToOneHotStandard(self):
one_hot = residue_constants.sequence_to_onehot(
'ARNDCQEGHILKMFPSTWYV', residue_constants.restype_order)
np.testing.assert_array_equal(one_hot, np.eye(20))
def testSequenceToOneHotUnknownMapping(self):
seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
expected_out = np.zeros([26, 21])
for row, position in enumerate(
[0, 20, 4, 3, 6, 13, 7, 8, 9, 20, 11, 10, 12, 2, 20, 14, 5, 1, 15, 16,
20, 19, 17, 20, 18, 20]):
expected_out[row, position] = 1
aa_types = residue_constants.sequence_to_onehot(
sequence=seq,
mapping=residue_constants.restype_order_with_x,
map_unknown_to_x=True)
self.assertTrue((aa_types == expected_out).all())
@parameterized.named_parameters(
('lowercase', 'aaa'), # Insertions in A3M.
('gaps', '---'), # Gaps in A3M.
('dots', '...'), # Gaps in A3M.
('metadata', '>TEST'), # FASTA metadata line.
)
def testSequenceToOneHotUnknownMappingError(self, seq):
with self.assertRaises(ValueError):
residue_constants.sequence_to_onehot(
sequence=seq,
mapping=residue_constants.restype_order_with_x,
map_unknown_to_x=True)
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/common/residue_constants_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for protein."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from alphafold.common import protein
from alphafold.common import residue_constants
import numpy as np
# Internal import (7716).
TEST_DATA_DIR = 'alphafold/common/testdata/'
class ProteinTest(parameterized.TestCase):
def _check_shapes(self, prot, num_res):
"""Check that the processed shapes are correct."""
num_atoms = residue_constants.atom_type_num
self.assertEqual((num_res, num_atoms, 3), prot.atom_positions.shape)
self.assertEqual((num_res,), prot.aatype.shape)
self.assertEqual((num_res, num_atoms), prot.atom_mask.shape)
self.assertEqual((num_res,), prot.residue_index.shape)
self.assertEqual((num_res,), prot.chain_index.shape)
self.assertEqual((num_res, num_atoms), prot.b_factors.shape)
@parameterized.named_parameters(
dict(testcase_name='chain_A',
pdb_file='2rbg.pdb', chain_id='A', num_res=282, num_chains=1),
dict(testcase_name='chain_B',
pdb_file='2rbg.pdb', chain_id='B', num_res=282, num_chains=1),
dict(testcase_name='multichain',
pdb_file='2rbg.pdb', chain_id=None, num_res=564, num_chains=2))
def test_from_pdb_str(self, pdb_file, chain_id, num_res, num_chains):
pdb_file = os.path.join(absltest.get_default_test_srcdir(), TEST_DATA_DIR,
pdb_file)
with open(pdb_file) as f:
pdb_string = f.read()
prot = protein.from_pdb_string(pdb_string, chain_id)
self._check_shapes(prot, num_res)
self.assertGreaterEqual(prot.aatype.min(), 0)
# Allow equal since unknown restypes have index equal to restype_num.
self.assertLessEqual(prot.aatype.max(), residue_constants.restype_num)
self.assertLen(np.unique(prot.chain_index), num_chains)
def test_to_pdb(self):
with open(
os.path.join(absltest.get_default_test_srcdir(), TEST_DATA_DIR,
'2rbg.pdb')) as f:
pdb_string = f.read()
prot = protein.from_pdb_string(pdb_string)
pdb_string_reconstr = protein.to_pdb(prot)
for line in pdb_string_reconstr.splitlines():
self.assertLen(line, 80)
prot_reconstr = protein.from_pdb_string(pdb_string_reconstr)
np.testing.assert_array_equal(prot_reconstr.aatype, prot.aatype)
np.testing.assert_array_almost_equal(
prot_reconstr.atom_positions, prot.atom_positions)
np.testing.assert_array_almost_equal(
prot_reconstr.atom_mask, prot.atom_mask)
np.testing.assert_array_equal(
prot_reconstr.residue_index, prot.residue_index)
np.testing.assert_array_equal(
prot_reconstr.chain_index, prot.chain_index)
np.testing.assert_array_almost_equal(
prot_reconstr.b_factors, prot.b_factors)
@parameterized.named_parameters(
dict(
testcase_name='glucagon',
pdb_file='glucagon.pdb',
model_type='Monomer',
),
dict(testcase_name='7bui', pdb_file='5nmu.pdb', model_type='Multimer'),
)
def test_to_mmcif(self, pdb_file, model_type):
with open(
os.path.join(
absltest.get_default_test_srcdir(), TEST_DATA_DIR, pdb_file
)
) as f:
pdb_string = f.read()
prot = protein.from_pdb_string(pdb_string)
file_id = 'test'
mmcif_string = protein.to_mmcif(prot, file_id, model_type)
prot_reconstr = protein.from_mmcif_string(mmcif_string)
np.testing.assert_array_equal(prot_reconstr.aatype, prot.aatype)
np.testing.assert_array_almost_equal(
prot_reconstr.atom_positions, prot.atom_positions
)
np.testing.assert_array_almost_equal(
prot_reconstr.atom_mask, prot.atom_mask
)
np.testing.assert_array_equal(
prot_reconstr.residue_index, prot.residue_index
)
np.testing.assert_array_equal(prot_reconstr.chain_index, prot.chain_index)
np.testing.assert_array_almost_equal(
prot_reconstr.b_factors, prot.b_factors
)
def test_ideal_atom_mask(self):
with open(
os.path.join(
absltest.get_default_test_srcdir(), TEST_DATA_DIR, '2rbg.pdb'
)
) as f:
pdb_string = f.read()
prot = protein.from_pdb_string(pdb_string)
ideal_mask = protein.ideal_atom_mask(prot)
non_ideal_residues = set([102] + list(range(127, 286)))
for i, (res, atom_mask) in enumerate(
zip(prot.residue_index, prot.atom_mask)
):
if res in non_ideal_residues:
self.assertFalse(np.all(atom_mask == ideal_mask[i]), msg=f'{res}')
else:
self.assertTrue(np.all(atom_mask == ideal_mask[i]), msg=f'{res}')
def test_too_many_chains(self):
num_res = protein.PDB_MAX_CHAINS + 1
num_atom_type = residue_constants.atom_type_num
with self.assertRaises(ValueError):
_ = protein.Protein(
atom_positions=np.random.random([num_res, num_atom_type, 3]),
aatype=np.random.randint(0, 21, [num_res]),
atom_mask=np.random.randint(0, 2, [num_res]).astype(np.float32),
residue_index=np.arange(1, num_res+1),
chain_index=np.arange(num_res),
b_factors=np.random.uniform(1, 100, [num_res]))
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/common/protein_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Protein data type."""
import collections
import dataclasses
import functools
import io
from typing import Any, Dict, List, Mapping, Optional, Tuple
from alphafold.common import mmcif_metadata
from alphafold.common import residue_constants
from Bio.PDB import MMCIFParser
from Bio.PDB import PDBParser
from Bio.PDB.mmcifio import MMCIFIO
from Bio.PDB.Structure import Structure
import numpy as np
FeatureDict = Mapping[str, np.ndarray]
ModelOutput = Mapping[str, Any] # Is a nested dict.
# Complete sequence of chain IDs supported by the PDB format.
PDB_CHAIN_IDS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
PDB_MAX_CHAINS = len(PDB_CHAIN_IDS) # := 62.
# Data to fill the _chem_comp table when writing mmCIFs.
_CHEM_COMP: Mapping[str, Tuple[Tuple[str, str], ...]] = {
'L-peptide linking': (
('ALA', 'ALANINE'),
('ARG', 'ARGININE'),
('ASN', 'ASPARAGINE'),
('ASP', 'ASPARTIC ACID'),
('CYS', 'CYSTEINE'),
('GLN', 'GLUTAMINE'),
('GLU', 'GLUTAMIC ACID'),
('HIS', 'HISTIDINE'),
('ILE', 'ISOLEUCINE'),
('LEU', 'LEUCINE'),
('LYS', 'LYSINE'),
('MET', 'METHIONINE'),
('PHE', 'PHENYLALANINE'),
('PRO', 'PROLINE'),
('SER', 'SERINE'),
('THR', 'THREONINE'),
('TRP', 'TRYPTOPHAN'),
('TYR', 'TYROSINE'),
('VAL', 'VALINE'),
),
'peptide linking': (('GLY', 'GLYCINE'),),
}
@dataclasses.dataclass(frozen=True)
class Protein:
"""Protein structure representation."""
# Cartesian coordinates of atoms in angstroms. The atom types correspond to
# residue_constants.atom_types, i.e. the first three are N, CA, CB.
atom_positions: np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
aatype: np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
atom_mask: np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
residue_index: np.ndarray # [num_res]
# 0-indexed number corresponding to the chain in the protein that this residue
# belongs to.
chain_index: np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
b_factors: np.ndarray # [num_res, num_atom_type]
def __post_init__(self):
if len(np.unique(self.chain_index)) > PDB_MAX_CHAINS:
raise ValueError(
f'Cannot build an instance with more than {PDB_MAX_CHAINS} chains '
'because these cannot be written to PDB format.')
def _from_bio_structure(
structure: Structure, chain_id: Optional[str] = None
) -> Protein:
"""Takes a Biopython structure and creates a `Protein` instance.
WARNING: All non-standard residue types will be converted into UNK. All
non-standard atoms will be ignored.
Args:
structure: Structure from the Biopython library.
chain_id: If chain_id is specified (e.g. A), then only that chain is parsed.
Otherwise all chains are parsed.
Returns:
A new `Protein` created from the structure contents.
Raises:
ValueError: If the number of models included in the structure is not 1.
ValueError: If insertion code is detected at a residue.
"""
models = list(structure.get_models())
if len(models) != 1:
raise ValueError(
'Only single model PDBs/mmCIFs are supported. Found'
f' {len(models)} models.'
)
model = models[0]
atom_positions = []
aatype = []
atom_mask = []
residue_index = []
chain_ids = []
b_factors = []
for chain in model:
if chain_id is not None and chain.id != chain_id:
continue
for res in chain:
if res.id[2] != ' ':
raise ValueError(
f'PDB/mmCIF contains an insertion code at chain {chain.id} and'
f' residue index {res.id[1]}. These are not supported.'
)
res_shortname = residue_constants.restype_3to1.get(res.resname, 'X')
restype_idx = residue_constants.restype_order.get(
res_shortname, residue_constants.restype_num)
pos = np.zeros((residue_constants.atom_type_num, 3))
mask = np.zeros((residue_constants.atom_type_num,))
res_b_factors = np.zeros((residue_constants.atom_type_num,))
for atom in res:
if atom.name not in residue_constants.atom_types:
continue
pos[residue_constants.atom_order[atom.name]] = atom.coord
mask[residue_constants.atom_order[atom.name]] = 1.
res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor
if np.sum(mask) < 0.5:
# If no known atom positions are reported for the residue then skip it.
continue
aatype.append(restype_idx)
atom_positions.append(pos)
atom_mask.append(mask)
residue_index.append(res.id[1])
chain_ids.append(chain.id)
b_factors.append(res_b_factors)
# Chain IDs are usually characters so map these to ints.
unique_chain_ids = np.unique(chain_ids)
chain_id_mapping = {cid: n for n, cid in enumerate(unique_chain_ids)}
chain_index = np.array([chain_id_mapping[cid] for cid in chain_ids])
return Protein(
atom_positions=np.array(atom_positions),
atom_mask=np.array(atom_mask),
aatype=np.array(aatype),
residue_index=np.array(residue_index),
chain_index=chain_index,
b_factors=np.array(b_factors))
def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
"""Takes a PDB string and constructs a `Protein` object.
WARNING: All non-standard residue types will be converted into UNK. All
non-standard atoms will be ignored.
Args:
pdb_str: The contents of the pdb file
chain_id: If chain_id is specified (e.g. A), then only that chain is parsed.
Otherwise all chains are parsed.
Returns:
A new `Protein` parsed from the pdb contents.
"""
with io.StringIO(pdb_str) as pdb_fh:
parser = PDBParser(QUIET=True)
structure = parser.get_structure(id='none', file=pdb_fh)
return _from_bio_structure(structure, chain_id)
def from_mmcif_string(
mmcif_str: str, chain_id: Optional[str] = None
) -> Protein:
"""Takes a mmCIF string and constructs a `Protein` object.
WARNING: All non-standard residue types will be converted into UNK. All
non-standard atoms will be ignored.
Args:
mmcif_str: The contents of the mmCIF file
chain_id: If chain_id is specified (e.g. A), then only that chain is parsed.
Otherwise all chains are parsed.
Returns:
A new `Protein` parsed from the mmCIF contents.
"""
with io.StringIO(mmcif_str) as mmcif_fh:
parser = MMCIFParser(QUIET=True)
structure = parser.get_structure(structure_id='none', filename=mmcif_fh)
return _from_bio_structure(structure, chain_id)
def _chain_end(atom_index, end_resname, chain_name, residue_index) -> str:
chain_end = 'TER'
return (f'{chain_end:<6}{atom_index:>5} {end_resname:>3} '
f'{chain_name:>1}{residue_index:>4}')
def to_pdb(prot: Protein) -> str:
"""Converts a `Protein` instance to a PDB string.
Args:
prot: The protein to convert to PDB.
Returns:
PDB string.
"""
restypes = residue_constants.restypes + ['X']
res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], 'UNK')
atom_types = residue_constants.atom_types
pdb_lines = []
atom_mask = prot.atom_mask
aatype = prot.aatype
atom_positions = prot.atom_positions
residue_index = prot.residue_index.astype(np.int32)
chain_index = prot.chain_index.astype(np.int32)
b_factors = prot.b_factors
if np.any(aatype > residue_constants.restype_num):
raise ValueError('Invalid aatypes.')
# Construct a mapping from chain integer indices to chain ID strings.
chain_ids = {}
for i in np.unique(chain_index): # np.unique gives sorted output.
if i >= PDB_MAX_CHAINS:
raise ValueError(
f'The PDB format supports at most {PDB_MAX_CHAINS} chains.')
chain_ids[i] = PDB_CHAIN_IDS[i]
pdb_lines.append('MODEL 1')
atom_index = 1
last_chain_index = chain_index[0]
# Add all atom sites.
for i in range(aatype.shape[0]):
# Close the previous chain if in a multichain PDB.
if last_chain_index != chain_index[i]:
pdb_lines.append(_chain_end(
atom_index, res_1to3(aatype[i - 1]), chain_ids[chain_index[i - 1]],
residue_index[i - 1]))
last_chain_index = chain_index[i]
atom_index += 1 # Atom index increases at the TER symbol.
res_name_3 = res_1to3(aatype[i])
for atom_name, pos, mask, b_factor in zip(
atom_types, atom_positions[i], atom_mask[i], b_factors[i]):
if mask < 0.5:
continue
record_type = 'ATOM'
name = atom_name if len(atom_name) == 4 else f' {atom_name}'
alt_loc = ''
insertion_code = ''
occupancy = 1.00
element = atom_name[0] # Protein supports only C, N, O, S, this works.
charge = ''
# PDB is a columnar format, every space matters here!
atom_line = (f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_3:>3} {chain_ids[chain_index[i]]:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}')
pdb_lines.append(atom_line)
atom_index += 1
# Close the final chain.
pdb_lines.append(_chain_end(atom_index, res_1to3(aatype[-1]),
chain_ids[chain_index[-1]], residue_index[-1]))
pdb_lines.append('ENDMDL')
pdb_lines.append('END')
# Pad all lines to 80 characters.
pdb_lines = [line.ljust(80) for line in pdb_lines]
return '\n'.join(pdb_lines) + '\n' # Add terminating newline.
def ideal_atom_mask(prot: Protein) -> np.ndarray:
"""Computes an ideal atom mask.
`Protein.atom_mask` typically is defined according to the atoms that are
reported in the PDB. This function computes a mask according to heavy atoms
that should be present in the given sequence of amino acids.
Args:
prot: `Protein` whose fields are `numpy.ndarray` objects.
Returns:
An ideal atom mask.
"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def from_prediction(
features: FeatureDict,
result: ModelOutput,
b_factors: Optional[np.ndarray] = None,
remove_leading_feature_dimension: bool = True) -> Protein:
"""Assembles a protein from a prediction.
Args:
features: Dictionary holding model inputs.
result: Dictionary holding model outputs.
b_factors: (Optional) B-factors to use for the protein.
remove_leading_feature_dimension: Whether to remove the leading dimension
of the `features` values.
Returns:
A protein instance.
"""
fold_output = result['structure_module']
def _maybe_remove_leading_dim(arr: np.ndarray) -> np.ndarray:
return arr[0] if remove_leading_feature_dimension else arr
if 'asym_id' in features:
chain_index = _maybe_remove_leading_dim(features['asym_id'])
else:
chain_index = np.zeros_like(_maybe_remove_leading_dim(features['aatype']))
if b_factors is None:
b_factors = np.zeros_like(fold_output['final_atom_mask'])
return Protein(
aatype=_maybe_remove_leading_dim(features['aatype']),
atom_positions=fold_output['final_atom_positions'],
atom_mask=fold_output['final_atom_mask'],
residue_index=_maybe_remove_leading_dim(features['residue_index']) + 1,
chain_index=chain_index,
b_factors=b_factors)
def to_mmcif(
prot: Protein,
file_id: str,
model_type: str,
) -> str:
"""Converts a `Protein` instance to an mmCIF string.
WARNING 1: The _entity_poly_seq is filled with unknown (UNK) residues for any
missing residue indices in the range from min(1, min(residue_index)) to
max(residue_index). E.g. for a protein object with positions for residues
2 (MET), 3 (LYS), 6 (GLY), this method would set the _entity_poly_seq to:
1 UNK
2 MET
3 LYS
4 UNK
5 UNK
6 GLY
This is done to preserve the residue numbering.
WARNING 2: Converting ground truth mmCIF file to Protein and then back to
mmCIF using this method will convert all non-standard residue types to UNK.
If you need this behaviour, you need to store more mmCIF metadata in the
Protein object (e.g. all fields except for the _atom_site loop).
WARNING 3: Converting ground truth mmCIF file to Protein and then back to
mmCIF using this method will not retain the original chain indices.
WARNING 4: In case of multiple identical chains, they are assigned different
`_atom_site.label_entity_id` values.
Args:
prot: A protein to convert to mmCIF string.
file_id: The file ID (usually the PDB ID) to be used in the mmCIF.
model_type: 'Multimer' or 'Monomer'.
Returns:
A valid mmCIF string.
Raises:
ValueError: If aminoacid types array contains entries with too many protein
types.
"""
atom_mask = prot.atom_mask
aatype = prot.aatype
atom_positions = prot.atom_positions
residue_index = prot.residue_index.astype(np.int32)
chain_index = prot.chain_index.astype(np.int32)
b_factors = prot.b_factors
# Construct a mapping from chain integer indices to chain ID strings.
chain_ids = {}
# We count unknown residues as protein residues.
for entity_id in np.unique(chain_index): # np.unique gives sorted output.
chain_ids[entity_id] = _int_id_to_str_id(entity_id + 1)
mmcif_dict = collections.defaultdict(list)
mmcif_dict['data_'] = file_id.upper()
mmcif_dict['_entry.id'] = file_id.upper()
label_asym_id_to_entity_id = {}
# Entity and chain information.
for entity_id, chain_id in chain_ids.items():
# Add all chain information to the _struct_asym table.
label_asym_id_to_entity_id[str(chain_id)] = str(entity_id)
mmcif_dict['_struct_asym.id'].append(chain_id)
mmcif_dict['_struct_asym.entity_id'].append(str(entity_id))
# Add information about the entity to the _entity_poly table.
mmcif_dict['_entity_poly.entity_id'].append(str(entity_id))
mmcif_dict['_entity_poly.type'].append(residue_constants.PROTEIN_CHAIN)
mmcif_dict['_entity_poly.pdbx_strand_id'].append(chain_id)
# Generate the _entity table.
mmcif_dict['_entity.id'].append(str(entity_id))
mmcif_dict['_entity.type'].append(residue_constants.POLYMER_CHAIN)
# Add the residues to the _entity_poly_seq table.
for entity_id, (res_ids, aas) in _get_entity_poly_seq(
aatype, residue_index, chain_index
).items():
for res_id, aa in zip(res_ids, aas):
mmcif_dict['_entity_poly_seq.entity_id'].append(str(entity_id))
mmcif_dict['_entity_poly_seq.num'].append(str(res_id))
mmcif_dict['_entity_poly_seq.mon_id'].append(
residue_constants.resnames[aa]
)
# Populate the chem comp table.
for chem_type, chem_comp in _CHEM_COMP.items():
for chem_id, chem_name in chem_comp:
mmcif_dict['_chem_comp.id'].append(chem_id)
mmcif_dict['_chem_comp.type'].append(chem_type)
mmcif_dict['_chem_comp.name'].append(chem_name)
# Add all atom sites.
atom_index = 1
for i in range(aatype.shape[0]):
res_name_3 = residue_constants.resnames[aatype[i]]
if aatype[i] <= len(residue_constants.restypes):
atom_names = residue_constants.atom_types
else:
raise ValueError(
'Amino acid types array contains entries with too many protein types.'
)
for atom_name, pos, mask, b_factor in zip(
atom_names, atom_positions[i], atom_mask[i], b_factors[i]
):
if mask < 0.5:
continue
type_symbol = residue_constants.atom_id_to_type(atom_name)
mmcif_dict['_atom_site.group_PDB'].append('ATOM')
mmcif_dict['_atom_site.id'].append(str(atom_index))
mmcif_dict['_atom_site.type_symbol'].append(type_symbol)
mmcif_dict['_atom_site.label_atom_id'].append(atom_name)
mmcif_dict['_atom_site.label_alt_id'].append('.')
mmcif_dict['_atom_site.label_comp_id'].append(res_name_3)
mmcif_dict['_atom_site.label_asym_id'].append(chain_ids[chain_index[i]])
mmcif_dict['_atom_site.label_entity_id'].append(
label_asym_id_to_entity_id[chain_ids[chain_index[i]]]
)
mmcif_dict['_atom_site.label_seq_id'].append(str(residue_index[i]))
mmcif_dict['_atom_site.pdbx_PDB_ins_code'].append('.')
mmcif_dict['_atom_site.Cartn_x'].append(f'{pos[0]:.3f}')
mmcif_dict['_atom_site.Cartn_y'].append(f'{pos[1]:.3f}')
mmcif_dict['_atom_site.Cartn_z'].append(f'{pos[2]:.3f}')
mmcif_dict['_atom_site.occupancy'].append('1.00')
mmcif_dict['_atom_site.B_iso_or_equiv'].append(f'{b_factor:.2f}')
mmcif_dict['_atom_site.auth_seq_id'].append(str(residue_index[i]))
mmcif_dict['_atom_site.auth_asym_id'].append(chain_ids[chain_index[i]])
mmcif_dict['_atom_site.pdbx_PDB_model_num'].append('1')
atom_index += 1
metadata_dict = mmcif_metadata.add_metadata_to_mmcif(mmcif_dict, model_type)
mmcif_dict.update(metadata_dict)
return _create_mmcif_string(mmcif_dict)
@functools.lru_cache(maxsize=256)
def _int_id_to_str_id(num: int) -> str:
"""Encodes a number as a string, using reverse spreadsheet style naming.
Args:
num: A positive integer.
Returns:
A string that encodes the positive integer using reverse spreadsheet style,
naming e.g. 1 = A, 2 = B, ..., 27 = AA, 28 = BA, 29 = CA, ... This is the
usual way to encode chain IDs in mmCIF files.
"""
if num <= 0:
raise ValueError(f'Only positive integers allowed, got {num}.')
num = num - 1 # 1-based indexing.
output = []
while num >= 0:
output.append(chr(num % 26 + ord('A')))
num = num // 26 - 1
return ''.join(output)
def _get_entity_poly_seq(
aatypes: np.ndarray, residue_indices: np.ndarray, chain_indices: np.ndarray
) -> Dict[int, Tuple[List[int], List[int]]]:
"""Constructs gapless residue index and aatype lists for each chain.
Args:
aatypes: A numpy array with aatypes.
residue_indices: A numpy array with residue indices.
chain_indices: A numpy array with chain indices.
Returns:
A dictionary mapping chain indices to a tuple with list of residue indices
and a list of aatypes. Missing residues are filled with UNK residue type.
"""
if (
aatypes.shape[0] != residue_indices.shape[0]
or aatypes.shape[0] != chain_indices.shape[0]
):
raise ValueError(
'aatypes, residue_indices, chain_indices must have the same length.'
)
# Group the present residues by chain index.
present = collections.defaultdict(list)
for chain_id, res_id, aa in zip(chain_indices, residue_indices, aatypes):
present[chain_id].append((res_id, aa))
# Add any missing residues (from 1 to the first residue and for any gaps).
entity_poly_seq = {}
for chain_id, present_residues in present.items():
present_residue_indices = set([x[0] for x in present_residues])
min_res_id = min(present_residue_indices) # Could be negative.
max_res_id = max(present_residue_indices)
new_residue_indices = []
new_aatypes = []
present_index = 0
for i in range(min(1, min_res_id), max_res_id + 1):
new_residue_indices.append(i)
if i in present_residue_indices:
new_aatypes.append(present_residues[present_index][1])
present_index += 1
else:
new_aatypes.append(20) # Unknown amino acid type.
entity_poly_seq[chain_id] = (new_residue_indices, new_aatypes)
return entity_poly_seq
def _create_mmcif_string(mmcif_dict: Dict[str, Any]) -> str:
"""Converts mmCIF dictionary into mmCIF string."""
mmcifio = MMCIFIO()
mmcifio.set_dict(mmcif_dict)
with io.StringIO() as file_handle:
mmcifio.save(file_handle)
return file_handle.getvalue()
|
alphafold-main
|
alphafold/common/protein.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in AlphaFold."""
import collections
import functools
import os
from typing import Final, List, Mapping, Tuple
import numpy as np
import tree
# Internal import (35fd).
# Distance from one CA to next CA [trans configuration: omega = 180].
ca_ca = 3.80209737096
# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
# chi angles so their chi angle lists are empty.
chi_angles_atoms = {
'ALA': [],
# Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
'ARG': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'NE'], ['CG', 'CD', 'NE', 'CZ']],
'ASN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']],
'ASP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']],
'CYS': [['N', 'CA', 'CB', 'SG']],
'GLN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'OE1']],
'GLU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'OE1']],
'GLY': [],
'HIS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'ND1']],
'ILE': [['N', 'CA', 'CB', 'CG1'], ['CA', 'CB', 'CG1', 'CD1']],
'LEU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'LYS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'],
['CB', 'CG', 'CD', 'CE'], ['CG', 'CD', 'CE', 'NZ']],
'MET': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'SD'],
['CB', 'CG', 'SD', 'CE']],
'PHE': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'PRO': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD']],
'SER': [['N', 'CA', 'CB', 'OG']],
'THR': [['N', 'CA', 'CB', 'OG1']],
'TRP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'TYR': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']],
'VAL': [['N', 'CA', 'CB', 'CG1']],
}
# If chi angles given in fixed-length array, this matrix determines how to mask
# them for each AA type. The order is as per restype_order (see below).
chi_angles_mask = [
[0.0, 0.0, 0.0, 0.0], # ALA
[1.0, 1.0, 1.0, 1.0], # ARG
[1.0, 1.0, 0.0, 0.0], # ASN
[1.0, 1.0, 0.0, 0.0], # ASP
[1.0, 0.0, 0.0, 0.0], # CYS
[1.0, 1.0, 1.0, 0.0], # GLN
[1.0, 1.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[1.0, 1.0, 0.0, 0.0], # HIS
[1.0, 1.0, 0.0, 0.0], # ILE
[1.0, 1.0, 0.0, 0.0], # LEU
[1.0, 1.0, 1.0, 1.0], # LYS
[1.0, 1.0, 1.0, 0.0], # MET
[1.0, 1.0, 0.0, 0.0], # PHE
[1.0, 1.0, 0.0, 0.0], # PRO
[1.0, 0.0, 0.0, 0.0], # SER
[1.0, 0.0, 0.0, 0.0], # THR
[1.0, 1.0, 0.0, 0.0], # TRP
[1.0, 1.0, 0.0, 0.0], # TYR
[1.0, 0.0, 0.0, 0.0], # VAL
]
# The following chi angles are pi periodic: they can be rotated by a multiple
# of pi without affecting the structure.
chi_pi_periodic = [
[0.0, 0.0, 0.0, 0.0], # ALA
[0.0, 0.0, 0.0, 0.0], # ARG
[0.0, 0.0, 0.0, 0.0], # ASN
[0.0, 1.0, 0.0, 0.0], # ASP
[0.0, 0.0, 0.0, 0.0], # CYS
[0.0, 0.0, 0.0, 0.0], # GLN
[0.0, 0.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[0.0, 0.0, 0.0, 0.0], # HIS
[0.0, 0.0, 0.0, 0.0], # ILE
[0.0, 0.0, 0.0, 0.0], # LEU
[0.0, 0.0, 0.0, 0.0], # LYS
[0.0, 0.0, 0.0, 0.0], # MET
[0.0, 1.0, 0.0, 0.0], # PHE
[0.0, 0.0, 0.0, 0.0], # PRO
[0.0, 0.0, 0.0, 0.0], # SER
[0.0, 0.0, 0.0, 0.0], # THR
[0.0, 0.0, 0.0, 0.0], # TRP
[0.0, 1.0, 0.0, 0.0], # TYR
[0.0, 0.0, 0.0, 0.0], # VAL
[0.0, 0.0, 0.0, 0.0], # UNK
]
# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
# psi and chi angles:
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
# The atom positions are relative to the axis-end-atom of the corresponding
# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
# is defined such that the dihedral-angle-defining atom (the last entry in
# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
# format: [atomname, group_idx, rel_position]
rigid_group_atom_positions = {
'ALA': [
['N', 0, (-0.525, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.529, -0.774, -1.205)],
['O', 3, (0.627, 1.062, 0.000)],
],
'ARG': [
['N', 0, (-0.524, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.524, -0.778, -1.209)],
['O', 3, (0.626, 1.062, 0.000)],
['CG', 4, (0.616, 1.390, -0.000)],
['CD', 5, (0.564, 1.414, 0.000)],
['NE', 6, (0.539, 1.357, -0.000)],
['NH1', 7, (0.206, 2.301, 0.000)],
['NH2', 7, (2.078, 0.978, -0.000)],
['CZ', 7, (0.758, 1.093, -0.000)],
],
'ASN': [
['N', 0, (-0.536, 1.357, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.531, -0.787, -1.200)],
['O', 3, (0.625, 1.062, 0.000)],
['CG', 4, (0.584, 1.399, 0.000)],
['ND2', 5, (0.593, -1.188, 0.001)],
['OD1', 5, (0.633, 1.059, 0.000)],
],
'ASP': [
['N', 0, (-0.525, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, 0.000, -0.000)],
['CB', 0, (-0.526, -0.778, -1.208)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.593, 1.398, -0.000)],
['OD1', 5, (0.610, 1.091, 0.000)],
['OD2', 5, (0.592, -1.101, -0.003)],
],
'CYS': [
['N', 0, (-0.522, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, 0.000, 0.000)],
['CB', 0, (-0.519, -0.773, -1.212)],
['O', 3, (0.625, 1.062, -0.000)],
['SG', 4, (0.728, 1.653, 0.000)],
],
'GLN': [
['N', 0, (-0.526, 1.361, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, 0.000)],
['CB', 0, (-0.525, -0.779, -1.207)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.615, 1.393, 0.000)],
['CD', 5, (0.587, 1.399, -0.000)],
['NE2', 6, (0.593, -1.189, -0.001)],
['OE1', 6, (0.634, 1.060, 0.000)],
],
'GLU': [
['N', 0, (-0.528, 1.361, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, -0.000, -0.000)],
['CB', 0, (-0.526, -0.781, -1.207)],
['O', 3, (0.626, 1.062, 0.000)],
['CG', 4, (0.615, 1.392, 0.000)],
['CD', 5, (0.600, 1.397, 0.000)],
['OE1', 6, (0.607, 1.095, -0.000)],
['OE2', 6, (0.589, -1.104, -0.001)],
],
'GLY': [
['N', 0, (-0.572, 1.337, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.517, -0.000, -0.000)],
['O', 3, (0.626, 1.062, -0.000)],
],
'HIS': [
['N', 0, (-0.527, 1.360, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, 0.000, 0.000)],
['CB', 0, (-0.525, -0.778, -1.208)],
['O', 3, (0.625, 1.063, 0.000)],
['CG', 4, (0.600, 1.370, -0.000)],
['CD2', 5, (0.889, -1.021, 0.003)],
['ND1', 5, (0.744, 1.160, -0.000)],
['CE1', 5, (2.030, 0.851, 0.002)],
['NE2', 5, (2.145, -0.466, 0.004)],
],
'ILE': [
['N', 0, (-0.493, 1.373, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, -0.000)],
['CB', 0, (-0.536, -0.793, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG1', 4, (0.534, 1.437, -0.000)],
['CG2', 4, (0.540, -0.785, -1.199)],
['CD1', 5, (0.619, 1.391, 0.000)],
],
'LEU': [
['N', 0, (-0.520, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.522, -0.773, -1.214)],
['O', 3, (0.625, 1.063, -0.000)],
['CG', 4, (0.678, 1.371, 0.000)],
['CD1', 5, (0.530, 1.430, -0.000)],
['CD2', 5, (0.535, -0.774, 1.200)],
],
'LYS': [
['N', 0, (-0.526, 1.362, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, 0.000)],
['CB', 0, (-0.524, -0.778, -1.208)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.619, 1.390, 0.000)],
['CD', 5, (0.559, 1.417, 0.000)],
['CE', 6, (0.560, 1.416, 0.000)],
['NZ', 7, (0.554, 1.387, 0.000)],
],
'MET': [
['N', 0, (-0.521, 1.364, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, 0.000, 0.000)],
['CB', 0, (-0.523, -0.776, -1.210)],
['O', 3, (0.625, 1.062, -0.000)],
['CG', 4, (0.613, 1.391, -0.000)],
['SD', 5, (0.703, 1.695, 0.000)],
['CE', 6, (0.320, 1.786, -0.000)],
],
'PHE': [
['N', 0, (-0.518, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, 0.000, -0.000)],
['CB', 0, (-0.525, -0.776, -1.212)],
['O', 3, (0.626, 1.062, -0.000)],
['CG', 4, (0.607, 1.377, 0.000)],
['CD1', 5, (0.709, 1.195, -0.000)],
['CD2', 5, (0.706, -1.196, 0.000)],
['CE1', 5, (2.102, 1.198, -0.000)],
['CE2', 5, (2.098, -1.201, -0.000)],
['CZ', 5, (2.794, -0.003, -0.001)],
],
'PRO': [
['N', 0, (-0.566, 1.351, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, 0.000)],
['CB', 0, (-0.546, -0.611, -1.293)],
['O', 3, (0.621, 1.066, 0.000)],
['CG', 4, (0.382, 1.445, 0.0)],
# ['CD', 5, (0.427, 1.440, 0.0)],
['CD', 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger
],
'SER': [
['N', 0, (-0.529, 1.360, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, -0.000)],
['CB', 0, (-0.518, -0.777, -1.211)],
['O', 3, (0.626, 1.062, -0.000)],
['OG', 4, (0.503, 1.325, 0.000)],
],
'THR': [
['N', 0, (-0.517, 1.364, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.526, 0.000, -0.000)],
['CB', 0, (-0.516, -0.793, -1.215)],
['O', 3, (0.626, 1.062, 0.000)],
['CG2', 4, (0.550, -0.718, -1.228)],
['OG1', 4, (0.472, 1.353, 0.000)],
],
'TRP': [
['N', 0, (-0.521, 1.363, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.525, -0.000, 0.000)],
['CB', 0, (-0.523, -0.776, -1.212)],
['O', 3, (0.627, 1.062, 0.000)],
['CG', 4, (0.609, 1.370, -0.000)],
['CD1', 5, (0.824, 1.091, 0.000)],
['CD2', 5, (0.854, -1.148, -0.005)],
['CE2', 5, (2.186, -0.678, -0.007)],
['CE3', 5, (0.622, -2.530, -0.007)],
['NE1', 5, (2.140, 0.690, -0.004)],
['CH2', 5, (3.028, -2.890, -0.013)],
['CZ2', 5, (3.283, -1.543, -0.011)],
['CZ3', 5, (1.715, -3.389, -0.011)],
],
'TYR': [
['N', 0, (-0.522, 1.362, 0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.524, -0.000, -0.000)],
['CB', 0, (-0.522, -0.776, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG', 4, (0.607, 1.382, -0.000)],
['CD1', 5, (0.716, 1.195, -0.000)],
['CD2', 5, (0.713, -1.194, -0.001)],
['CE1', 5, (2.107, 1.200, -0.002)],
['CE2', 5, (2.104, -1.201, -0.003)],
['OH', 5, (4.168, -0.002, -0.005)],
['CZ', 5, (2.791, -0.001, -0.003)],
],
'VAL': [
['N', 0, (-0.494, 1.373, -0.000)],
['CA', 0, (0.000, 0.000, 0.000)],
['C', 0, (1.527, -0.000, -0.000)],
['CB', 0, (-0.533, -0.795, -1.213)],
['O', 3, (0.627, 1.062, -0.000)],
['CG1', 4, (0.540, 1.429, -0.000)],
['CG2', 4, (0.533, -0.776, 1.203)],
],
}
# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
residue_atoms = {
'ALA': ['C', 'CA', 'CB', 'N', 'O'],
'ARG': ['C', 'CA', 'CB', 'CG', 'CD', 'CZ', 'N', 'NE', 'O', 'NH1', 'NH2'],
'ASP': ['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
'ASN': ['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
'CYS': ['C', 'CA', 'CB', 'N', 'O', 'SG'],
'GLU': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O', 'OE1', 'OE2'],
'GLN': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'NE2', 'O', 'OE1'],
'GLY': ['C', 'CA', 'N', 'O'],
'HIS': ['C', 'CA', 'CB', 'CG', 'CD2', 'CE1', 'N', 'ND1', 'NE2', 'O'],
'ILE': ['C', 'CA', 'CB', 'CG1', 'CG2', 'CD1', 'N', 'O'],
'LEU': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'N', 'O'],
'LYS': ['C', 'CA', 'CB', 'CG', 'CD', 'CE', 'N', 'NZ', 'O'],
'MET': ['C', 'CA', 'CB', 'CG', 'CE', 'N', 'O', 'SD'],
'PHE': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O'],
'PRO': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O'],
'SER': ['C', 'CA', 'CB', 'N', 'O', 'OG'],
'THR': ['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
'TRP': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'CZ2', 'CZ3',
'CH2', 'N', 'NE1', 'O'],
'TYR': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O',
'OH'],
'VAL': ['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O']
}
# Naming swaps for ambiguous atom names.
# Due to symmetries in the amino acids the naming of atoms is ambiguous in
# 4 of the 20 amino acids.
# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
# in LEU, VAL and ARG can be resolved by using the 3d constellations of
# the 'ambiguous' atoms and their neighbours)
residue_atom_renaming_swaps = {
'ASP': {'OD1': 'OD2'},
'GLU': {'OE1': 'OE2'},
'PHE': {'CD1': 'CD2', 'CE1': 'CE2'},
'TYR': {'CD1': 'CD2', 'CE1': 'CE2'},
}
# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
van_der_waals_radius = {
'C': 1.7,
'N': 1.55,
'O': 1.52,
'S': 1.8,
}
Bond = collections.namedtuple(
'Bond', ['atom1_name', 'atom2_name', 'length', 'stddev'])
BondAngle = collections.namedtuple(
'BondAngle',
['atom1_name', 'atom2_name', 'atom3name', 'angle_rad', 'stddev'])
@functools.lru_cache(maxsize=None)
def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],
Mapping[str, List[Bond]],
Mapping[str, List[BondAngle]]]:
"""Load stereo_chemical_props.txt into a nice structure.
Load literature values for bond lengths and bond angles and translate
bond angles into the length of the opposite edge of the triangle
("residue_virtual_bonds").
Returns:
residue_bonds: Dict that maps resname -> list of Bond tuples.
residue_virtual_bonds: Dict that maps resname -> list of Bond tuples.
residue_bond_angles: Dict that maps resname -> list of BondAngle tuples.
"""
stereo_chemical_props_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'stereo_chemical_props.txt'
)
with open(stereo_chemical_props_path, 'rt') as f:
stereo_chemical_props = f.read()
lines_iter = iter(stereo_chemical_props.splitlines())
# Load bond lengths.
residue_bonds = {}
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == '-':
break
bond, resname, length, stddev = line.split()
atom1, atom2 = bond.split('-')
if resname not in residue_bonds:
residue_bonds[resname] = []
residue_bonds[resname].append(
Bond(atom1, atom2, float(length), float(stddev)))
residue_bonds['UNK'] = []
# Load bond angles.
residue_bond_angles = {}
next(lines_iter) # Skip empty line.
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == '-':
break
bond, resname, angle_degree, stddev_degree = line.split()
atom1, atom2, atom3 = bond.split('-')
if resname not in residue_bond_angles:
residue_bond_angles[resname] = []
residue_bond_angles[resname].append(
BondAngle(atom1, atom2, atom3,
float(angle_degree) / 180. * np.pi,
float(stddev_degree) / 180. * np.pi))
residue_bond_angles['UNK'] = []
def make_bond_key(atom1_name, atom2_name):
"""Unique key to lookup bonds."""
return '-'.join(sorted([atom1_name, atom2_name]))
# Translate bond angles into distances ("virtual bonds").
residue_virtual_bonds = {}
for resname, bond_angles in residue_bond_angles.items():
# Create a fast lookup dict for bond lengths.
bond_cache = {}
for b in residue_bonds[resname]:
bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
residue_virtual_bonds[resname] = []
for ba in bond_angles:
bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
# Compute distance between atom1 and atom3 using the law of cosines
# c^2 = a^2 + b^2 - 2ab*cos(gamma).
gamma = ba.angle_rad
length = np.sqrt(bond1.length**2 + bond2.length**2
- 2 * bond1.length * bond2.length * np.cos(gamma))
# Propagation of uncertainty assuming uncorrelated errors.
dl_outer = 0.5 / length
dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
stddev = np.sqrt((dl_dgamma * ba.stddev)**2 +
(dl_db1 * bond1.stddev)**2 +
(dl_db2 * bond2.stddev)**2)
residue_virtual_bonds[resname].append(
Bond(ba.atom1_name, ba.atom3name, length, stddev))
return (residue_bonds,
residue_virtual_bonds,
residue_bond_angles)
# Between-residue bond lengths for general bonds (first element) and for Proline
# (second element).
between_res_bond_length_c_n = [1.329, 1.341]
between_res_bond_length_stddev_c_n = [0.014, 0.016]
# Between-residue cos_angles.
between_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315
between_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995
# This mapping is used when we need to store atom data in a format that requires
# fixed atom data size for every residue (e.g. a numpy array).
atom_types = [
'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD',
'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3',
'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2',
'CZ3', 'NZ', 'OXT'
]
atom_order = {atom_type: i for i, atom_type in enumerate(atom_types)}
atom_type_num = len(atom_types) # := 37.
# A compact atom encoding with 14 columns
# pylint: disable=line-too-long
# pylint: disable=bad-whitespace
restype_name_to_atom14_names = {
'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''],
'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2', '', '', ''],
'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '', ''],
'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '', ''],
'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''],
'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '', '', ''],
'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '', '', ''],
'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''],
'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '', '', '', ''],
'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '', ''],
'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '', ''],
'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '', ''],
'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '', ''],
'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', '', '', ''],
'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''],
'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''],
'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '', ''],
'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2'],
'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH', '', ''],
'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '', ''],
'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', ''],
}
# pylint: enable=line-too-long
# pylint: enable=bad-whitespace
# This is the standard residue order when coding AA type as a number.
# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
restypes = [
'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',
'S', 'T', 'W', 'Y', 'V'
]
restype_order = {restype: i for i, restype in enumerate(restypes)}
restype_num = len(restypes) # := 20.
unk_restype_index = restype_num # Catch-all index for unknown restypes.
restypes_with_x = restypes + ['X']
restype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}
def sequence_to_onehot(
sequence: str,
mapping: Mapping[str, int],
map_unknown_to_x: bool = False) -> np.ndarray:
"""Maps the given sequence into a one-hot encoded matrix.
Args:
sequence: An amino acid sequence.
mapping: A dictionary mapping amino acids to integers.
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
mapped to the unknown amino acid 'X'. If the mapping doesn't contain
amino acid 'X', an error will be thrown. If False, any amino acid not in
the mapping will throw an error.
Returns:
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of
the sequence.
Raises:
ValueError: If the mapping doesn't contain values from 0 to
num_unique_aas - 1 without any gaps.
"""
num_entries = max(mapping.values()) + 1
if sorted(set(mapping.values())) != list(range(num_entries)):
raise ValueError('The mapping must have values from 0 to num_unique_aas-1 '
'without any gaps. Got: %s' % sorted(mapping.values()))
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
for aa_index, aa_type in enumerate(sequence):
if map_unknown_to_x:
if aa_type.isalpha() and aa_type.isupper():
aa_id = mapping.get(aa_type, mapping['X'])
else:
raise ValueError(f'Invalid character in the sequence: {aa_type}')
else:
aa_id = mapping[aa_type]
one_hot_arr[aa_index, aa_id] = 1
return one_hot_arr
restype_1to3 = {
'A': 'ALA',
'R': 'ARG',
'N': 'ASN',
'D': 'ASP',
'C': 'CYS',
'Q': 'GLN',
'E': 'GLU',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'L': 'LEU',
'K': 'LYS',
'M': 'MET',
'F': 'PHE',
'P': 'PRO',
'S': 'SER',
'T': 'THR',
'W': 'TRP',
'Y': 'TYR',
'V': 'VAL',
}
PROTEIN_CHAIN: Final[str] = 'polypeptide(L)'
POLYMER_CHAIN: Final[str] = 'polymer'
def atom_id_to_type(atom_id: str) -> str:
"""Convert atom ID to atom type, works only for standard protein residues.
Args:
atom_id: Atom ID to be converted.
Returns:
String corresponding to atom type.
Raises:
ValueError: If atom ID not recognized.
"""
if atom_id.startswith('C'):
return 'C'
elif atom_id.startswith('N'):
return 'N'
elif atom_id.startswith('O'):
return 'O'
elif atom_id.startswith('H'):
return 'H'
elif atom_id.startswith('S'):
return 'S'
raise ValueError('Atom ID not recognized.')
# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
# many more, and less common, three letter names as keys and maps many of these
# to the same one letter name (including 'X' and 'U' which we don't use here).
restype_3to1 = {v: k for k, v in restype_1to3.items()}
# Define a restype name for all unknown residues.
unk_restype = 'UNK'
resnames = [restype_1to3[r] for r in restypes] + [unk_restype]
resname_to_idx = {resname: i for i, resname in enumerate(resnames)}
# The mapping here uses hhblits convention, so that B is mapped to D, J and O
# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
# remaining 20 amino acids are kept in alphabetical order.
# There are 2 non-amino acid codes, X (representing any amino acid) and
# "-" representing a missing amino acid in an alignment. The id for these
# codes is put at the end (20 and 21) so that they can easily be ignored if
# desired.
HHBLITS_AA_TO_ID = {
'A': 0,
'B': 2,
'C': 1,
'D': 2,
'E': 3,
'F': 4,
'G': 5,
'H': 6,
'I': 7,
'J': 20,
'K': 8,
'L': 9,
'M': 10,
'N': 11,
'O': 20,
'P': 12,
'Q': 13,
'R': 14,
'S': 15,
'T': 16,
'U': 1,
'V': 17,
'W': 18,
'X': 20,
'Y': 19,
'Z': 3,
'-': 21,
}
# Partial inversion of HHBLITS_AA_TO_ID.
ID_TO_HHBLITS_AA = {
0: 'A',
1: 'C', # Also U.
2: 'D', # Also B.
3: 'E', # Also Z.
4: 'F',
5: 'G',
6: 'H',
7: 'I',
8: 'K',
9: 'L',
10: 'M',
11: 'N',
12: 'P',
13: 'Q',
14: 'R',
15: 'S',
16: 'T',
17: 'V',
18: 'W',
19: 'Y',
20: 'X', # Includes J and O.
21: '-',
}
restypes_with_x_and_gap = restypes + ['X', '-']
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])
for i in range(len(restypes_with_x_and_gap)))
def _make_standard_atom_mask() -> np.ndarray:
"""Returns [num_res_types, num_atom_types] mask array."""
# +1 to account for unknown (all 0s).
mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
for restype, restype_letter in enumerate(restypes):
restype_name = restype_1to3[restype_letter]
atom_names = residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = atom_order[atom_name]
mask[restype, atom_type] = 1
return mask
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# A one hot representation for the first and second atoms defining the axis
# of rotation for each chi-angle in each residue.
def chi_angle_atom(atom_index: int) -> np.ndarray:
"""Define chi-angle rigid groups via one-hot representations."""
chi_angles_index = {}
one_hots = []
for k, v in chi_angles_atoms.items():
indices = [atom_types.index(s[atom_index]) for s in v]
indices.extend([-1]*(4-len(indices)))
chi_angles_index[k] = indices
for r in restypes:
res3 = restype_1to3[r]
one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
one_hots.append(one_hot)
one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
one_hot = np.stack(one_hots, axis=0)
one_hot = np.transpose(one_hot, [0, 2, 1])
return one_hot
chi_atom_1_one_hot = chi_angle_atom(1)
chi_atom_2_one_hot = chi_angle_atom(2)
# An array like chi_angles_atoms but using indices rather than names.
chi_angles_atom_indices = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
chi_angles_atom_indices = tree.map_structure(
lambda atom_name: atom_order[atom_name], chi_angles_atom_indices)
chi_angles_atom_indices = np.array([
chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms)))
for chi_atoms in chi_angles_atom_indices])
# Mapping from (res_name, atom_name) pairs to the atom's chi group index
# and atom index within that group.
chi_groups_for_atom = collections.defaultdict(list)
for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
for atom_i, atom in enumerate(chi_group):
chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
chi_groups_for_atom = dict(chi_groups_for_atom)
def _make_rigid_transformation_4x4(ex, ey, translation):
"""Create a rigid 4x4 transformation matrix from two axes and transl."""
# Normalize ex.
ex_normalized = ex / np.linalg.norm(ex)
# make ey perpendicular to ex
ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
ey_normalized /= np.linalg.norm(ey_normalized)
# compute ez as cross product
eznorm = np.cross(ex_normalized, ey_normalized)
m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
m = np.concatenate([m, [[0., 0., 0., 1.]]], axis=0)
return m
# create an array with (restype, atomtype) --> rigid_group_idx
# and an array with (restype, atomtype, coord) for the atom positions
# and compute affine transformation matrices (4,4) from one rigid group to the
# previous group
restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int)
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int)
restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
def _make_rigid_group_constants():
"""Fill the arrays above."""
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
for atomname, group_idx, atom_position in rigid_group_atom_positions[
resname]:
atomtype = atom_order[atomname]
restype_atom37_to_rigid_group[restype, atomtype] = group_idx
restype_atom37_mask[restype, atomtype] = 1
restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position
atom14idx = restype_name_to_atom14_names[resname].index(atomname)
restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
restype_atom14_mask[restype, atom14idx] = 1
restype_atom14_rigid_group_positions[restype,
atom14idx, :] = atom_position
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_positions = {name: np.array(pos) for name, _, pos
in rigid_group_atom_positions[resname]}
# backbone to backbone is the identity transform
restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
# pre-omega-frame to backbone (currently dummy identity matrix)
restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
# phi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions['N'] - atom_positions['CA'],
ey=np.array([1., 0., 0.]),
translation=atom_positions['N'])
restype_rigid_group_default_frame[restype, 2, :, :] = mat
# psi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions['C'] - atom_positions['CA'],
ey=atom_positions['CA'] - atom_positions['N'],
translation=atom_positions['C'])
restype_rigid_group_default_frame[restype, 3, :, :] = mat
# chi1-frame to backbone
if chi_angles_mask[restype][0]:
base_atom_names = chi_angles_atoms[resname][0]
base_atom_positions = [atom_positions[name] for name in base_atom_names]
mat = _make_rigid_transformation_4x4(
ex=base_atom_positions[2] - base_atom_positions[1],
ey=base_atom_positions[0] - base_atom_positions[1],
translation=base_atom_positions[2])
restype_rigid_group_default_frame[restype, 4, :, :] = mat
# chi2-frame to chi1-frame
# chi3-frame to chi2-frame
# chi4-frame to chi3-frame
# luckily all rotation axes for the next frame start at (0,0,0) of the
# previous frame
for chi_idx in range(1, 4):
if chi_angles_mask[restype][chi_idx]:
axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
axis_end_atom_position = atom_positions[axis_end_atom_name]
mat = _make_rigid_transformation_4x4(
ex=axis_end_atom_position,
ey=np.array([-1., 0., 0.]),
translation=axis_end_atom_position)
restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat
_make_rigid_group_constants()
def make_atom14_dists_bounds(overlap_tolerance=1.5,
bond_length_tolerance_factor=15):
"""compute upper and lower bounds for bonds to assess violations."""
restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_list = restype_name_to_atom14_names[resname]
# create lower and upper bounds for clashes
for atom1_idx, atom1_name in enumerate(atom_list):
if not atom1_name:
continue
atom1_radius = van_der_waals_radius[atom1_name[0]]
for atom2_idx, atom2_name in enumerate(atom_list):
if (not atom2_name) or atom1_idx == atom2_idx:
continue
atom2_radius = van_der_waals_radius[atom2_name[0]]
lower = atom1_radius + atom2_radius - overlap_tolerance
upper = 1e10
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
# overwrite lower and upper bounds for bonds and angles
for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
atom1_idx = atom_list.index(b.atom1_name)
atom2_idx = atom_list.index(b.atom2_name)
lower = b.length - bond_length_tolerance_factor * b.stddev
upper = b.length + bond_length_tolerance_factor * b.stddev
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
return {'lower_bound': restype_atom14_bond_lower_bound, # shape (21,14,14)
'upper_bound': restype_atom14_bond_upper_bound, # shape (21,14,14)
'stddev': restype_atom14_bond_stddev, # shape (21,14,14)
}
|
alphafold-main
|
alphafold/common/residue_constants.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mmCIF metadata."""
from typing import Mapping, Sequence
from alphafold import version
import numpy as np
_DISCLAIMER = """ALPHAFOLD DATA, COPYRIGHT (2021) DEEPMIND TECHNOLOGIES LIMITED.
THE INFORMATION PROVIDED IS THEORETICAL MODELLING ONLY AND CAUTION SHOULD BE
EXERCISED IN ITS USE. IT IS PROVIDED "AS-IS" WITHOUT ANY WARRANTY OF ANY KIND,
WHETHER EXPRESSED OR IMPLIED. NO WARRANTY IS GIVEN THAT USE OF THE INFORMATION
SHALL NOT INFRINGE THE RIGHTS OF ANY THIRD PARTY. DISCLAIMER: THE INFORMATION IS
NOT INTENDED TO BE A SUBSTITUTE FOR PROFESSIONAL MEDICAL ADVICE, DIAGNOSIS, OR
TREATMENT, AND DOES NOT CONSTITUTE MEDICAL OR OTHER PROFESSIONAL ADVICE. IT IS
AVAILABLE FOR ACADEMIC AND COMMERCIAL PURPOSES, UNDER CC-BY 4.0 LICENCE."""
# Authors of the Nature methods paper we reference in the mmCIF.
_MMCIF_PAPER_AUTHORS = (
'Jumper, John',
'Evans, Richard',
'Pritzel, Alexander',
'Green, Tim',
'Figurnov, Michael',
'Ronneberger, Olaf',
'Tunyasuvunakool, Kathryn',
'Bates, Russ',
'Zidek, Augustin',
'Potapenko, Anna',
'Bridgland, Alex',
'Meyer, Clemens',
'Kohl, Simon A. A.',
'Ballard, Andrew J.',
'Cowie, Andrew',
'Romera-Paredes, Bernardino',
'Nikolov, Stanislav',
'Jain, Rishub',
'Adler, Jonas',
'Back, Trevor',
'Petersen, Stig',
'Reiman, David',
'Clancy, Ellen',
'Zielinski, Michal',
'Steinegger, Martin',
'Pacholska, Michalina',
'Berghammer, Tamas',
'Silver, David',
'Vinyals, Oriol',
'Senior, Andrew W.',
'Kavukcuoglu, Koray',
'Kohli, Pushmeet',
'Hassabis, Demis',
)
# Authors of the mmCIF - we set them to be equal to the authors of the paper.
_MMCIF_AUTHORS = _MMCIF_PAPER_AUTHORS
def add_metadata_to_mmcif(
old_cif: Mapping[str, Sequence[str]], model_type: str
) -> Mapping[str, Sequence[str]]:
"""Adds AlphaFold metadata in the given mmCIF."""
cif = {}
# ModelCIF conformation dictionary.
cif['_audit_conform.dict_name'] = ['mmcif_ma.dic']
cif['_audit_conform.dict_version'] = ['1.3.9']
cif['_audit_conform.dict_location'] = [
'https://raw.githubusercontent.com/ihmwg/ModelCIF/master/dist/'
'mmcif_ma.dic'
]
# License and disclaimer.
cif['_pdbx_data_usage.id'] = ['1', '2']
cif['_pdbx_data_usage.type'] = ['license', 'disclaimer']
cif['_pdbx_data_usage.details'] = [
'Data in this file is available under a CC-BY-4.0 license.',
_DISCLAIMER,
]
cif['_pdbx_data_usage.url'] = [
'https://creativecommons.org/licenses/by/4.0/',
'?',
]
cif['_pdbx_data_usage.name'] = ['CC-BY-4.0', '?']
# Structure author details.
cif['_audit_author.name'] = []
cif['_audit_author.pdbx_ordinal'] = []
for author_index, author_name in enumerate(_MMCIF_AUTHORS, start=1):
cif['_audit_author.name'].append(author_name)
cif['_audit_author.pdbx_ordinal'].append(str(author_index))
# Paper author details.
cif['_citation_author.citation_id'] = []
cif['_citation_author.name'] = []
cif['_citation_author.ordinal'] = []
for author_index, author_name in enumerate(_MMCIF_PAPER_AUTHORS, start=1):
cif['_citation_author.citation_id'].append('primary')
cif['_citation_author.name'].append(author_name)
cif['_citation_author.ordinal'].append(str(author_index))
# Paper citation details.
cif['_citation.id'] = ['primary']
cif['_citation.title'] = [
'Highly accurate protein structure prediction with AlphaFold'
]
cif['_citation.journal_full'] = ['Nature']
cif['_citation.journal_volume'] = ['596']
cif['_citation.page_first'] = ['583']
cif['_citation.page_last'] = ['589']
cif['_citation.year'] = ['2021']
cif['_citation.journal_id_ASTM'] = ['NATUAS']
cif['_citation.country'] = ['UK']
cif['_citation.journal_id_ISSN'] = ['0028-0836']
cif['_citation.journal_id_CSD'] = ['0006']
cif['_citation.book_publisher'] = ['?']
cif['_citation.pdbx_database_id_PubMed'] = ['34265844']
cif['_citation.pdbx_database_id_DOI'] = ['10.1038/s41586-021-03819-2']
# Type of data in the dataset including data used in the model generation.
cif['_ma_data.id'] = ['1']
cif['_ma_data.name'] = ['Model']
cif['_ma_data.content_type'] = ['model coordinates']
# Description of number of instances for each entity.
cif['_ma_target_entity_instance.asym_id'] = old_cif['_struct_asym.id']
cif['_ma_target_entity_instance.entity_id'] = old_cif[
'_struct_asym.entity_id'
]
cif['_ma_target_entity_instance.details'] = ['.'] * len(
cif['_ma_target_entity_instance.entity_id']
)
# Details about the target entities.
cif['_ma_target_entity.entity_id'] = cif[
'_ma_target_entity_instance.entity_id'
]
cif['_ma_target_entity.data_id'] = ['1'] * len(
cif['_ma_target_entity.entity_id']
)
cif['_ma_target_entity.origin'] = ['.'] * len(
cif['_ma_target_entity.entity_id']
)
# Details of the models being deposited.
cif['_ma_model_list.ordinal_id'] = ['1']
cif['_ma_model_list.model_id'] = ['1']
cif['_ma_model_list.model_group_id'] = ['1']
cif['_ma_model_list.model_name'] = ['Top ranked model']
cif['_ma_model_list.model_group_name'] = [
f'AlphaFold {model_type} v{version.__version__} model'
]
cif['_ma_model_list.data_id'] = ['1']
cif['_ma_model_list.model_type'] = ['Ab initio model']
# Software used.
cif['_software.pdbx_ordinal'] = ['1']
cif['_software.name'] = ['AlphaFold']
cif['_software.version'] = [f'v{version.__version__}']
cif['_software.type'] = ['package']
cif['_software.description'] = ['Structure prediction']
cif['_software.classification'] = ['other']
cif['_software.date'] = ['?']
# Collection of software into groups.
cif['_ma_software_group.ordinal_id'] = ['1']
cif['_ma_software_group.group_id'] = ['1']
cif['_ma_software_group.software_id'] = ['1']
# Method description to conform with ModelCIF.
cif['_ma_protocol_step.ordinal_id'] = ['1', '2', '3']
cif['_ma_protocol_step.protocol_id'] = ['1', '1', '1']
cif['_ma_protocol_step.step_id'] = ['1', '2', '3']
cif['_ma_protocol_step.method_type'] = [
'coevolution MSA',
'template search',
'modeling',
]
# Details of the metrics use to assess model confidence.
cif['_ma_qa_metric.id'] = ['1', '2']
cif['_ma_qa_metric.name'] = ['pLDDT', 'pLDDT']
# Accepted values are distance, energy, normalised score, other, zscore.
cif['_ma_qa_metric.type'] = ['pLDDT', 'pLDDT']
cif['_ma_qa_metric.mode'] = ['global', 'local']
cif['_ma_qa_metric.software_group_id'] = ['1', '1']
# Global model confidence metric value.
cif['_ma_qa_metric_global.ordinal_id'] = ['1']
cif['_ma_qa_metric_global.model_id'] = ['1']
cif['_ma_qa_metric_global.metric_id'] = ['1']
global_plddt = np.mean(
[float(v) for v in old_cif['_atom_site.B_iso_or_equiv']]
)
cif['_ma_qa_metric_global.metric_value'] = [f'{global_plddt:.2f}']
cif['_atom_type.symbol'] = sorted(set(old_cif['_atom_site.type_symbol']))
return cif
|
alphafold-main
|
alphafold/common/mmcif_metadata.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""lDDT protein distance score."""
import jax.numpy as jnp
def lddt(predicted_points,
true_points,
true_points_mask,
cutoff=15.,
per_residue=False):
"""Measure (approximate) lDDT for a batch of coordinates.
lDDT reference:
Mariani, V., Biasini, M., Barbato, A. & Schwede, T. lDDT: A local
superposition-free score for comparing protein structures and models using
distance difference tests. Bioinformatics 29, 2722–2728 (2013).
lDDT is a measure of the difference between the true distance matrix and the
distance matrix of the predicted points. The difference is computed only on
points closer than cutoff *in the true structure*.
This function does not compute the exact lDDT value that the original paper
describes because it does not include terms for physical feasibility
(e.g. bond length violations). Therefore this is only an approximate
lDDT score.
Args:
predicted_points: (batch, length, 3) array of predicted 3D points
true_points: (batch, length, 3) array of true 3D points
true_points_mask: (batch, length, 1) binary-valued float array. This mask
should be 1 for points that exist in the true points.
cutoff: Maximum distance for a pair of points to be included
per_residue: If true, return score for each residue. Note that the overall
lDDT is not exactly the mean of the per_residue lDDT's because some
residues have more contacts than others.
Returns:
An (approximate, see above) lDDT score in the range 0-1.
"""
assert len(predicted_points.shape) == 3
assert predicted_points.shape[-1] == 3
assert true_points_mask.shape[-1] == 1
assert len(true_points_mask.shape) == 3
# Compute true and predicted distance matrices.
dmat_true = jnp.sqrt(1e-10 + jnp.sum(
(true_points[:, :, None] - true_points[:, None, :])**2, axis=-1))
dmat_predicted = jnp.sqrt(1e-10 + jnp.sum(
(predicted_points[:, :, None] -
predicted_points[:, None, :])**2, axis=-1))
dists_to_score = (
(dmat_true < cutoff).astype(jnp.float32) * true_points_mask *
jnp.transpose(true_points_mask, [0, 2, 1]) *
(1. - jnp.eye(dmat_true.shape[1])) # Exclude self-interaction.
)
# Shift unscored distances to be far away.
dist_l1 = jnp.abs(dmat_true - dmat_predicted)
# True lDDT uses a number of fixed bins.
# We ignore the physical plausibility correction to lDDT, though.
score = 0.25 * ((dist_l1 < 0.5).astype(jnp.float32) +
(dist_l1 < 1.0).astype(jnp.float32) +
(dist_l1 < 2.0).astype(jnp.float32) +
(dist_l1 < 4.0).astype(jnp.float32))
# Normalize over the appropriate axes.
reduce_axes = (-1,) if per_residue else (-2, -1)
norm = 1. / (1e-10 + jnp.sum(dists_to_score, axis=reduce_axes))
score = norm * (1e-10 + jnp.sum(dists_to_score * score, axis=reduce_axes))
return score
|
alphafold-main
|
alphafold/model/lddt.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of utilities surrounding PRNG usage in protein folding."""
import haiku as hk
import jax
def safe_dropout(*, tensor, safe_key, rate, is_deterministic, is_training):
if is_training and rate != 0.0 and not is_deterministic:
return hk.dropout(safe_key.get(), rate, tensor)
else:
return tensor
class SafeKey:
"""Safety wrapper for PRNG keys."""
def __init__(self, key):
self._key = key
self._used = False
def _assert_not_used(self):
if self._used:
raise RuntimeError('Random key has been used previously.')
def get(self):
self._assert_not_used()
self._used = True
return self._key
def split(self, num_keys=2):
self._assert_not_used()
self._used = True
new_keys = jax.random.split(self._key, num_keys)
return jax.tree_map(SafeKey, tuple(new_keys))
def duplicate(self, num_keys=2):
self._assert_not_used()
self._used = True
return tuple(SafeKey(self._key) for _ in range(num_keys))
def _safe_key_flatten(safe_key):
# Flatten transfers "ownership" to the tree
return (safe_key._key,), safe_key._used # pylint: disable=protected-access
def _safe_key_unflatten(aux_data, children):
ret = SafeKey(children[0])
ret._used = aux_data # pylint: disable=protected-access
return ret
jax.tree_util.register_pytree_node(
SafeKey, _safe_key_flatten, _safe_key_unflatten)
|
alphafold-main
|
alphafold/model/prng.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model config."""
import copy
from alphafold.model.tf import shape_placeholders
import ml_collections
NUM_RES = shape_placeholders.NUM_RES
NUM_MSA_SEQ = shape_placeholders.NUM_MSA_SEQ
NUM_EXTRA_SEQ = shape_placeholders.NUM_EXTRA_SEQ
NUM_TEMPLATES = shape_placeholders.NUM_TEMPLATES
def model_config(name: str) -> ml_collections.ConfigDict:
"""Get the ConfigDict of a CASP14 model."""
if name not in CONFIG_DIFFS:
raise ValueError(f'Invalid model name {name}.')
if 'multimer' in name:
cfg = copy.deepcopy(CONFIG_MULTIMER)
else:
cfg = copy.deepcopy(CONFIG)
cfg.update_from_flattened_dict(CONFIG_DIFFS[name])
return cfg
MODEL_PRESETS = {
'monomer': (
'model_1',
'model_2',
'model_3',
'model_4',
'model_5',
),
'monomer_ptm': (
'model_1_ptm',
'model_2_ptm',
'model_3_ptm',
'model_4_ptm',
'model_5_ptm',
),
'multimer': (
'model_1_multimer_v3',
'model_2_multimer_v3',
'model_3_multimer_v3',
'model_4_multimer_v3',
'model_5_multimer_v3',
),
}
MODEL_PRESETS['monomer_casp14'] = MODEL_PRESETS['monomer']
CONFIG_DIFFS = {
'model_1': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.1.1
'data.common.max_extra_msa': 5120,
'data.common.reduce_msa_clusters_by_max_templates': True,
'data.common.use_templates': True,
'model.embeddings_and_evoformer.template.embed_torsion_angles': True,
'model.embeddings_and_evoformer.template.enabled': True
},
'model_2': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.1.2
'data.common.reduce_msa_clusters_by_max_templates': True,
'data.common.use_templates': True,
'model.embeddings_and_evoformer.template.embed_torsion_angles': True,
'model.embeddings_and_evoformer.template.enabled': True
},
'model_3': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.2.1
'data.common.max_extra_msa': 5120,
},
'model_4': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.2.2
'data.common.max_extra_msa': 5120,
},
'model_5': {
# Jumper et al. (2021) Suppl. Table 5, Model 1.2.3
},
# The following models are fine-tuned from the corresponding models above
# with an additional predicted_aligned_error head that can produce
# predicted TM-score (pTM) and predicted aligned errors.
'model_1_ptm': {
'data.common.max_extra_msa': 5120,
'data.common.reduce_msa_clusters_by_max_templates': True,
'data.common.use_templates': True,
'model.embeddings_and_evoformer.template.embed_torsion_angles': True,
'model.embeddings_and_evoformer.template.enabled': True,
'model.heads.predicted_aligned_error.weight': 0.1
},
'model_2_ptm': {
'data.common.reduce_msa_clusters_by_max_templates': True,
'data.common.use_templates': True,
'model.embeddings_and_evoformer.template.embed_torsion_angles': True,
'model.embeddings_and_evoformer.template.enabled': True,
'model.heads.predicted_aligned_error.weight': 0.1
},
'model_3_ptm': {
'data.common.max_extra_msa': 5120,
'model.heads.predicted_aligned_error.weight': 0.1
},
'model_4_ptm': {
'data.common.max_extra_msa': 5120,
'model.heads.predicted_aligned_error.weight': 0.1
},
'model_5_ptm': {
'model.heads.predicted_aligned_error.weight': 0.1
},
'model_1_multimer_v3': {},
'model_2_multimer_v3': {},
'model_3_multimer_v3': {},
'model_4_multimer_v3': {
'model.embeddings_and_evoformer.num_extra_msa': 1152
},
'model_5_multimer_v3': {
'model.embeddings_and_evoformer.num_extra_msa': 1152
},
}
# Key differences between multimer v1/v2 and v3, mostly due to numerical
# optimisations in the TriangleMultiplication module.
common_updates = {
'model.embeddings_and_evoformer.num_msa': 252,
'model.embeddings_and_evoformer.num_extra_msa': 1152,
'model.embeddings_and_evoformer.evoformer.triangle_multiplication_incoming.fuse_projection_weights': False,
'model.embeddings_and_evoformer.evoformer.triangle_multiplication_outgoing.fuse_projection_weights': False,
'model.embeddings_and_evoformer.template.template_pair_stack.triangle_multiplication_incoming.fuse_projection_weights': False,
'model.embeddings_and_evoformer.template.template_pair_stack.triangle_multiplication_outgoing.fuse_projection_weights': False,
}
CONFIG_DIFFS.update(
{f'model_{i}_multimer': common_updates for i in range(1, 6)})
CONFIG_DIFFS.update(
{f'model_{i}_multimer_v2': common_updates for i in range(1, 6)})
CONFIG = ml_collections.ConfigDict({
'data': {
'common': {
'masked_msa': {
'profile_prob': 0.1,
'same_prob': 0.1,
'uniform_prob': 0.1
},
'max_extra_msa': 1024,
'msa_cluster_features': True,
'num_recycle': 3,
'reduce_msa_clusters_by_max_templates': False,
'resample_msa_in_recycling': True,
'template_features': [
'template_all_atom_positions', 'template_sum_probs',
'template_aatype', 'template_all_atom_masks',
'template_domain_names'
],
'unsupervised_features': [
'aatype', 'residue_index', 'sequence', 'msa', 'domain_name',
'num_alignments', 'seq_length', 'between_segment_residues',
'deletion_matrix'
],
'use_templates': False,
},
'eval': {
'feat': {
'aatype': [NUM_RES],
'all_atom_mask': [NUM_RES, None],
'all_atom_positions': [NUM_RES, None, None],
'alt_chi_angles': [NUM_RES, None],
'atom14_alt_gt_exists': [NUM_RES, None],
'atom14_alt_gt_positions': [NUM_RES, None, None],
'atom14_atom_exists': [NUM_RES, None],
'atom14_atom_is_ambiguous': [NUM_RES, None],
'atom14_gt_exists': [NUM_RES, None],
'atom14_gt_positions': [NUM_RES, None, None],
'atom37_atom_exists': [NUM_RES, None],
'backbone_affine_mask': [NUM_RES],
'backbone_affine_tensor': [NUM_RES, None],
'bert_mask': [NUM_MSA_SEQ, NUM_RES],
'chi_angles': [NUM_RES, None],
'chi_mask': [NUM_RES, None],
'extra_deletion_value': [NUM_EXTRA_SEQ, NUM_RES],
'extra_has_deletion': [NUM_EXTRA_SEQ, NUM_RES],
'extra_msa': [NUM_EXTRA_SEQ, NUM_RES],
'extra_msa_mask': [NUM_EXTRA_SEQ, NUM_RES],
'extra_msa_row_mask': [NUM_EXTRA_SEQ],
'is_distillation': [],
'msa_feat': [NUM_MSA_SEQ, NUM_RES, None],
'msa_mask': [NUM_MSA_SEQ, NUM_RES],
'msa_row_mask': [NUM_MSA_SEQ],
'pseudo_beta': [NUM_RES, None],
'pseudo_beta_mask': [NUM_RES],
'random_crop_to_size_seed': [None],
'residue_index': [NUM_RES],
'residx_atom14_to_atom37': [NUM_RES, None],
'residx_atom37_to_atom14': [NUM_RES, None],
'resolution': [],
'rigidgroups_alt_gt_frames': [NUM_RES, None, None],
'rigidgroups_group_exists': [NUM_RES, None],
'rigidgroups_group_is_ambiguous': [NUM_RES, None],
'rigidgroups_gt_exists': [NUM_RES, None],
'rigidgroups_gt_frames': [NUM_RES, None, None],
'seq_length': [],
'seq_mask': [NUM_RES],
'target_feat': [NUM_RES, None],
'template_aatype': [NUM_TEMPLATES, NUM_RES],
'template_all_atom_masks': [NUM_TEMPLATES, NUM_RES, None],
'template_all_atom_positions': [
NUM_TEMPLATES, NUM_RES, None, None],
'template_backbone_affine_mask': [NUM_TEMPLATES, NUM_RES],
'template_backbone_affine_tensor': [
NUM_TEMPLATES, NUM_RES, None],
'template_mask': [NUM_TEMPLATES],
'template_pseudo_beta': [NUM_TEMPLATES, NUM_RES, None],
'template_pseudo_beta_mask': [NUM_TEMPLATES, NUM_RES],
'template_sum_probs': [NUM_TEMPLATES, None],
'true_msa': [NUM_MSA_SEQ, NUM_RES]
},
'fixed_size': True,
'subsample_templates': False, # We want top templates.
'masked_msa_replace_fraction': 0.15,
'max_msa_clusters': 512,
'max_templates': 4,
'num_ensemble': 1,
},
},
'model': {
'embeddings_and_evoformer': {
'evoformer_num_block': 48,
'evoformer': {
'msa_row_attention_with_pair_bias': {
'dropout_rate': 0.15,
'gating': True,
'num_head': 8,
'orientation': 'per_row',
'shared_dropout': True
},
'msa_column_attention': {
'dropout_rate': 0.0,
'gating': True,
'num_head': 8,
'orientation': 'per_column',
'shared_dropout': True
},
'msa_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'outer_product_mean': {
'first': False,
'chunk_size': 128,
'dropout_rate': 0.0,
'num_outer_channel': 32,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': False,
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': False,
},
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
}
},
'extra_msa_channel': 64,
'extra_msa_stack_num_block': 4,
'max_relative_feature': 32,
'msa_channel': 256,
'pair_channel': 128,
'prev_pos': {
'min_bin': 3.25,
'max_bin': 20.75,
'num_bins': 15
},
'recycle_features': True,
'recycle_pos': True,
'seq_channel': 384,
'template': {
'attention': {
'gating': False,
'key_dim': 64,
'num_head': 4,
'value_dim': 64
},
'dgram_features': {
'min_bin': 3.25,
'max_bin': 50.75,
'num_bins': 39
},
'embed_torsion_angles': False,
'enabled': False,
'template_pair_stack': {
'num_block': 2,
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'key_dim': 64,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True,
'value_dim': 64
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'key_dim': 64,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True,
'value_dim': 64
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': False,
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': False,
},
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 2,
'orientation': 'per_row',
'shared_dropout': True
}
},
'max_templates': 4,
'subbatch_size': 128,
'use_template_unit_vector': False,
}
},
'global_config': {
'deterministic': False,
'multimer_mode': False,
'subbatch_size': 4,
'use_remat': False,
'zero_init': True,
'eval_dropout': False,
},
'heads': {
'distogram': {
'first_break': 2.3125,
'last_break': 21.6875,
'num_bins': 64,
'weight': 0.3
},
'predicted_aligned_error': {
# `num_bins - 1` bins uniformly space the
# [0, max_error_bin A] range.
# The final bin covers [max_error_bin A, +infty]
# 31A gives bins with 0.5A width.
'max_error_bin': 31.,
'num_bins': 64,
'num_channels': 128,
'filter_by_resolution': True,
'min_resolution': 0.1,
'max_resolution': 3.0,
'weight': 0.0,
},
'experimentally_resolved': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'weight': 0.01
},
'structure_module': {
'num_layer': 8,
'fape': {
'clamp_distance': 10.0,
'clamp_type': 'relu',
'loss_unit_distance': 10.0
},
'angle_norm_weight': 0.01,
'chi_weight': 0.5,
'clash_overlap_tolerance': 1.5,
'compute_in_graph_metrics': True,
'dropout': 0.1,
'num_channel': 384,
'num_head': 12,
'num_layer_in_transition': 3,
'num_point_qk': 4,
'num_point_v': 8,
'num_scalar_qk': 16,
'num_scalar_v': 16,
'position_scale': 10.0,
'sidechain': {
'atom_clamp_distance': 10.0,
'num_channel': 128,
'num_residual_block': 2,
'weight_frac': 0.5,
'length_scale': 10.,
},
'structural_violation_loss_weight': 1.0,
'violation_tolerance_factor': 12.0,
'weight': 1.0
},
'predicted_lddt': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'num_bins': 50,
'num_channels': 128,
'weight': 0.01
},
'masked_msa': {
'num_output': 23,
'weight': 2.0
},
},
'num_recycle': 3,
'resample_msa_in_recycling': True
},
})
CONFIG_MULTIMER = ml_collections.ConfigDict({
'model': {
'embeddings_and_evoformer': {
'evoformer_num_block': 48,
'evoformer': {
'msa_column_attention': {
'dropout_rate': 0.0,
'gating': True,
'num_head': 8,
'orientation': 'per_column',
'shared_dropout': True
},
'msa_row_attention_with_pair_bias': {
'dropout_rate': 0.15,
'gating': True,
'num_head': 8,
'orientation': 'per_row',
'shared_dropout': True
},
'msa_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'outer_product_mean': {
'chunk_size': 128,
'dropout_rate': 0.0,
'first': True,
'num_outer_channel': 32,
'orientation': 'per_row',
'shared_dropout': True
},
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True
},
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True,
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': True,
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 128,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': True,
}
},
'extra_msa_channel': 64,
'extra_msa_stack_num_block': 4,
'num_msa': 508,
'num_extra_msa': 2048,
'masked_msa': {
'profile_prob': 0.1,
'replace_fraction': 0.15,
'same_prob': 0.1,
'uniform_prob': 0.1
},
'use_chain_relative': True,
'max_relative_chain': 2,
'max_relative_idx': 32,
'seq_channel': 384,
'msa_channel': 256,
'pair_channel': 128,
'prev_pos': {
'max_bin': 20.75,
'min_bin': 3.25,
'num_bins': 15
},
'recycle_features': True,
'recycle_pos': True,
'template': {
'attention': {
'gating': False,
'num_head': 4
},
'dgram_features': {
'max_bin': 50.75,
'min_bin': 3.25,
'num_bins': 39
},
'enabled': True,
'max_templates': 4,
'num_channels': 64,
'subbatch_size': 128,
'template_pair_stack': {
'num_block': 2,
'pair_transition': {
'dropout_rate': 0.0,
'num_intermediate_factor': 2,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_attention_ending_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_column',
'shared_dropout': True
},
'triangle_attention_starting_node': {
'dropout_rate': 0.25,
'gating': True,
'num_head': 4,
'orientation': 'per_row',
'shared_dropout': True
},
'triangle_multiplication_incoming': {
'dropout_rate': 0.25,
'equation': 'kjc,kic->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': True,
},
'triangle_multiplication_outgoing': {
'dropout_rate': 0.25,
'equation': 'ikc,jkc->ijc',
'num_intermediate_channel': 64,
'orientation': 'per_row',
'shared_dropout': True,
'fuse_projection_weights': True,
}
}
},
},
'global_config': {
'bfloat16': True,
'bfloat16_output': False,
'deterministic': False,
'multimer_mode': True,
'subbatch_size': 4,
'use_remat': False,
'zero_init': True,
'eval_dropout': False,
},
'heads': {
'distogram': {
'first_break': 2.3125,
'last_break': 21.6875,
'num_bins': 64,
'weight': 0.3
},
'experimentally_resolved': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'weight': 0.01
},
'masked_msa': {
'weight': 2.0
},
'predicted_aligned_error': {
'filter_by_resolution': True,
'max_error_bin': 31.0,
'max_resolution': 3.0,
'min_resolution': 0.1,
'num_bins': 64,
'num_channels': 128,
'weight': 0.1
},
'predicted_lddt': {
'filter_by_resolution': True,
'max_resolution': 3.0,
'min_resolution': 0.1,
'num_bins': 50,
'num_channels': 128,
'weight': 0.01
},
'structure_module': {
'angle_norm_weight': 0.01,
'chi_weight': 0.5,
'clash_overlap_tolerance': 1.5,
'dropout': 0.1,
'interface_fape': {
'atom_clamp_distance': 1000.0,
'loss_unit_distance': 20.0
},
'intra_chain_fape': {
'atom_clamp_distance': 10.0,
'loss_unit_distance': 10.0
},
'num_channel': 384,
'num_head': 12,
'num_layer': 8,
'num_layer_in_transition': 3,
'num_point_qk': 4,
'num_point_v': 8,
'num_scalar_qk': 16,
'num_scalar_v': 16,
'position_scale': 20.0,
'sidechain': {
'atom_clamp_distance': 10.0,
'loss_unit_distance': 10.0,
'num_channel': 128,
'num_residual_block': 2,
'weight_frac': 0.5
},
'structural_violation_loss_weight': 1.0,
'violation_tolerance_factor': 12.0,
'weight': 1.0
}
},
'num_ensemble_eval': 1,
'num_recycle': 20,
# A negative value indicates that no early stopping will occur, i.e.
# the model will always run `num_recycle` number of recycling
# iterations. A positive value will enable early stopping if the
# difference in pairwise distances is less than the tolerance between
# recycling steps.
'recycle_early_stop_tolerance': 0.5,
'resample_msa_in_recycling': True
}
})
|
alphafold-main
|
alphafold/model/config.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for layer_stack."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from alphafold.model import layer_stack
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import scipy.stats
# Suffixes applied by Haiku for repeated module names.
suffixes = [''] + [f'_{i}' for i in range(1, 100)]
def _slice_layers_params(layers_params):
sliced_layers_params = {}
for k, v in layers_params.items():
for inner_k in v:
for var_slice, suffix in zip(v[inner_k], suffixes):
k_new = k.split('/')[-1] + suffix
if k_new not in sliced_layers_params:
sliced_layers_params[k_new] = {}
sliced_layers_params[k_new][inner_k] = var_slice
return sliced_layers_params
class LayerStackTest(parameterized.TestCase):
@parameterized.parameters([1, 2, 4])
def test_layer_stack(self, unroll):
"""Compare layer_stack to the equivalent unrolled stack.
Tests that the layer_stack application of a Haiku layer function is
equivalent to repeatedly applying the layer function in an unrolled loop.
Args:
unroll: Number of unrolled layers.
"""
num_layers = 20
def inner_fn(x):
x += hk.Linear(100, name='linear1')(x)
x += hk.Linear(100, name='linear2')(x)
return x
def outer_fn_unrolled(x):
for _ in range(num_layers):
x = inner_fn(x)
return x
def outer_fn_layer_stack(x):
stack = layer_stack.layer_stack(num_layers, unroll=unroll)(inner_fn)
return stack(x)
unrolled_fn = hk.transform(outer_fn_unrolled)
layer_stack_fn = hk.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x)
sliced_params = _slice_layers_params(params)
unrolled_pred = unrolled_fn.apply(sliced_params, None, x)
layer_stack_pred = layer_stack_fn.apply(params, None, x)
np.testing.assert_allclose(unrolled_pred, layer_stack_pred)
def test_layer_stack_multi_args(self):
"""Compare layer_stack to the equivalent unrolled stack.
Similar to `test_layer_stack`, but use a function that takes more than one
argument.
"""
num_layers = 20
def inner_fn(x, y):
x_out = x + hk.Linear(100, name='linear1')(y)
y_out = y + hk.Linear(100, name='linear2')(x)
return x_out, y_out
def outer_fn_unrolled(x, y):
for _ in range(num_layers):
x, y = inner_fn(x, y)
return x, y
def outer_fn_layer_stack(x, y):
stack = layer_stack.layer_stack(num_layers)(inner_fn)
return stack(x, y)
unrolled_fn = hk.transform(outer_fn_unrolled)
layer_stack_fn = hk.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
y = jax.random.uniform(jax.random.PRNGKey(1), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x, y)
sliced_params = _slice_layers_params(params)
unrolled_x, unrolled_y = unrolled_fn.apply(sliced_params, None, x, y)
layer_stack_x, layer_stack_y = layer_stack_fn.apply(params, None, x, y)
np.testing.assert_allclose(unrolled_x, layer_stack_x)
np.testing.assert_allclose(unrolled_y, layer_stack_y)
def test_layer_stack_no_varargs(self):
"""Test an error is raised when using a function with varargs."""
class VarArgsModule(hk.Module):
"""When used, this module should cause layer_stack to raise an Error."""
def __call__(self, *args):
return args
class NoVarArgsModule(hk.Module):
"""This module should be fine to use with layer_stack."""
def __call__(self, x):
return x
def build_and_init_stack(module_class):
def stack_fn(x):
module = module_class()
return layer_stack.layer_stack(1)(module)(x)
stack = hk.without_apply_rng(hk.transform(stack_fn))
stack.init(jax.random.PRNGKey(1729), jnp.ones([5]))
build_and_init_stack(NoVarArgsModule)
with self.assertRaisesRegex(
ValueError, 'The function `f` should not have any `varargs`'):
build_and_init_stack(VarArgsModule)
@parameterized.parameters([1, 2, 4])
def test_layer_stack_grads(self, unroll):
"""Compare layer_stack gradients to the equivalent unrolled stack.
Tests that the layer_stack application of a Haiku layer function is
equivalent to repeatedly applying the layer function in an unrolled loop.
Args:
unroll: Number of unrolled layers.
"""
num_layers = 20
def inner_fn(x):
x += hk.Linear(100, name='linear1')(x)
x += hk.Linear(100, name='linear2')(x)
return x
def outer_fn_unrolled(x):
for _ in range(num_layers):
x = inner_fn(x)
return x
def outer_fn_layer_stack(x):
stack = layer_stack.layer_stack(num_layers, unroll=unroll)(inner_fn)
return stack(x)
unrolled_fn = hk.transform(outer_fn_unrolled)
layer_stack_fn = hk.transform(outer_fn_layer_stack)
x = jax.random.uniform(jax.random.PRNGKey(0), [10, 256, 100])
rng_init = jax.random.PRNGKey(42)
params = layer_stack_fn.init(rng_init, x)
sliced_params = _slice_layers_params(params)
unrolled_grad = jax.grad(
lambda p, x: jnp.mean(unrolled_fn.apply(p, None, x)))(sliced_params, x)
layer_stack_grad = jax.grad(
lambda p, x: jnp.mean(layer_stack_fn.apply(p, None, x)))(params, x)
assert_fn = functools.partial(
np.testing.assert_allclose, atol=1e-4, rtol=1e-4)
jax.tree_map(assert_fn, unrolled_grad,
_slice_layers_params(layer_stack_grad))
def test_random(self):
"""Random numbers should be handled correctly."""
n = 100
@hk.transform
@layer_stack.layer_stack(n)
def add_random(x):
x = x + jax.random.normal(hk.next_rng_key())
return x
# Evaluate a bunch of times
key, *keys = jax.random.split(jax.random.PRNGKey(7), 1024 + 1)
params = add_random.init(key, 0.)
apply_fn = jax.jit(add_random.apply)
values = [apply_fn(params, key, 0.) for key in keys]
# Should be roughly N(0, sqrt(n))
cdf = scipy.stats.norm(scale=np.sqrt(n)).cdf
_, p = scipy.stats.kstest(values, cdf)
self.assertLess(0.3, p)
def test_threading(self):
"""Test @layer_stack when the function gets per-layer state."""
n = 5
@layer_stack.layer_stack(n, with_state=True)
def f(x, y):
x = x + y * jax.nn.one_hot(y, len(x)) / 10
return x, 2 * y
@hk.without_apply_rng
@hk.transform
def g(x, ys):
x, zs = f(x, ys)
# Check here to catch issues at init time
self.assertEqual(zs.shape, (n,))
return x, zs
rng = jax.random.PRNGKey(7)
x = np.zeros(n)
ys = np.arange(n).astype(np.float32)
params = g.init(rng, x, ys)
x, zs = g.apply(params, x, ys)
self.assertTrue(np.allclose(x, [0, .1, .2, .3, .4]))
self.assertTrue(np.all(zs == 2 * ys))
def test_nested_stacks(self):
def stack_fn(x):
def layer_fn(x):
return hk.Linear(100)(x)
outer_fn = layer_stack.layer_stack(10)(layer_fn)
layer_outer = layer_stack.layer_stack(20)(outer_fn)
return layer_outer(x)
hk_mod = hk.transform(stack_fn)
apply_rng, init_rng = jax.random.split(jax.random.PRNGKey(0))
params = hk_mod.init(init_rng, jnp.zeros([10, 100]))
hk_mod.apply(params, apply_rng, jnp.zeros([10, 100]))
p, = params.values()
assert p['w'].shape == (10, 20, 100, 100)
assert p['b'].shape == (10, 20, 100)
def test_with_state_multi_args(self):
"""Test layer_stack with state with multiple arguments."""
width = 4
batch_size = 5
stack_height = 3
def f_with_multi_args(x, a, b):
return hk.Linear(
width, w_init=hk.initializers.Constant(
jnp.eye(width)))(x) * a + b, None
@hk.without_apply_rng
@hk.transform
def hk_fn(x):
return layer_stack.layer_stack(
stack_height,
with_state=True)(f_with_multi_args)(x, jnp.full([stack_height], 2.),
jnp.ones([stack_height]))
x = jnp.zeros([batch_size, width])
key_seq = hk.PRNGSequence(19)
params = hk_fn.init(next(key_seq), x)
output, z = hk_fn.apply(params, x)
self.assertIsNone(z)
self.assertEqual(output.shape, (batch_size, width))
np.testing.assert_equal(output, np.full([batch_size, width], 7.))
def test_with_container_state(self):
width = 2
batch_size = 2
stack_height = 3
def f_with_container_state(x):
hk_layer = hk.Linear(
width, w_init=hk.initializers.Constant(jnp.eye(width)))
layer_output = hk_layer(x)
layer_state = {
'raw_output': layer_output,
'output_projection': jnp.sum(layer_output)
}
return layer_output + jnp.ones_like(layer_output), layer_state
@hk.without_apply_rng
@hk.transform
def hk_fn(x):
return layer_stack.layer_stack(
stack_height,
with_state=True)(f_with_container_state)(x)
x = jnp.zeros([batch_size, width])
key_seq = hk.PRNGSequence(19)
params = hk_fn.init(next(key_seq), x)
output, z = hk_fn.apply(params, x)
self.assertEqual(z['raw_output'].shape, (stack_height, batch_size, width))
self.assertEqual(output.shape, (batch_size, width))
self.assertEqual(z['output_projection'].shape, (stack_height,))
np.testing.assert_equal(np.sum(z['output_projection']), np.array(12.))
np.testing.assert_equal(
np.all(z['raw_output'] == np.array([0., 1., 2.])[..., None, None]),
np.array(True))
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/model/layer_stack_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core modules, which have been refactored in AlphaFold-Multimer.
The main difference is that MSA sampling pipeline is moved inside the JAX model
for easier implementation of recycling and ensembling.
Lower-level modules up to EvoformerIteration are reused from modules.py.
"""
import functools
from typing import Sequence
from alphafold.common import residue_constants
from alphafold.model import all_atom_multimer
from alphafold.model import common_modules
from alphafold.model import folding_multimer
from alphafold.model import geometry
from alphafold.model import layer_stack
from alphafold.model import modules
from alphafold.model import prng
from alphafold.model import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def reduce_fn(x, mode):
if mode == 'none' or mode is None:
return jnp.asarray(x)
elif mode == 'sum':
return jnp.asarray(x).sum()
elif mode == 'mean':
return jnp.mean(jnp.asarray(x))
else:
raise ValueError('Unsupported reduction option.')
def gumbel_noise(key: jnp.ndarray, shape: Sequence[int]) -> jnp.ndarray:
"""Generate Gumbel Noise of given Shape.
This generates samples from Gumbel(0, 1).
Args:
key: Jax random number key.
shape: Shape of noise to return.
Returns:
Gumbel noise of given shape.
"""
epsilon = 1e-6
uniform = utils.padding_consistent_rng(jax.random.uniform)
uniform_noise = uniform(
key, shape=shape, dtype=jnp.float32, minval=0., maxval=1.)
gumbel = -jnp.log(-jnp.log(uniform_noise + epsilon) + epsilon)
return gumbel
def gumbel_max_sample(key: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
"""Samples from a probability distribution given by 'logits'.
This uses Gumbel-max trick to implement the sampling in an efficient manner.
Args:
key: prng key.
logits: Logarithm of probabilities to sample from, probabilities can be
unnormalized.
Returns:
Sample from logprobs in one-hot form.
"""
z = gumbel_noise(key, logits.shape)
return jax.nn.one_hot(
jnp.argmax(logits + z, axis=-1),
logits.shape[-1],
dtype=logits.dtype)
def gumbel_argsort_sample_idx(key: jnp.ndarray,
logits: jnp.ndarray) -> jnp.ndarray:
"""Samples with replacement from a distribution given by 'logits'.
This uses Gumbel trick to implement the sampling an efficient manner. For a
distribution over k items this samples k times without replacement, so this
is effectively sampling a random permutation with probabilities over the
permutations derived from the logprobs.
Args:
key: prng key.
logits: Logarithm of probabilities to sample from, probabilities can be
unnormalized.
Returns:
Sample from logprobs in one-hot form.
"""
z = gumbel_noise(key, logits.shape)
# This construction is equivalent to jnp.argsort, but using a non stable sort,
# since stable sort's aren't supported by jax2tf.
axis = len(logits.shape) - 1
iota = jax.lax.broadcasted_iota(jnp.int64, logits.shape, axis)
_, perm = jax.lax.sort_key_val(
logits + z, iota, dimension=-1, is_stable=False)
return perm[::-1]
def make_masked_msa(batch, key, config, epsilon=1e-6):
"""Create data for BERT on raw MSA."""
# Add a random amino acid uniformly.
random_aa = jnp.array([0.05] * 20 + [0., 0.], dtype=jnp.float32)
categorical_probs = (
config.uniform_prob * random_aa +
config.profile_prob * batch['msa_profile'] +
config.same_prob * jax.nn.one_hot(batch['msa'], 22))
# Put all remaining probability on [MASK] which is a new column.
pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))]
pad_shapes[-1][1] = 1
mask_prob = 1. - config.profile_prob - config.same_prob - config.uniform_prob
assert mask_prob >= 0.
categorical_probs = jnp.pad(
categorical_probs, pad_shapes, constant_values=mask_prob)
sh = batch['msa'].shape
key, mask_subkey, gumbel_subkey = key.split(3)
uniform = utils.padding_consistent_rng(jax.random.uniform)
mask_position = uniform(mask_subkey.get(), sh) < config.replace_fraction
mask_position *= batch['msa_mask']
logits = jnp.log(categorical_probs + epsilon)
bert_msa = gumbel_max_sample(gumbel_subkey.get(), logits)
bert_msa = jnp.where(mask_position,
jnp.argmax(bert_msa, axis=-1), batch['msa'])
bert_msa *= batch['msa_mask']
# Mix real and masked MSA.
if 'bert_mask' in batch:
batch['bert_mask'] *= mask_position.astype(jnp.float32)
else:
batch['bert_mask'] = mask_position.astype(jnp.float32)
batch['true_msa'] = batch['msa']
batch['msa'] = bert_msa
return batch
def nearest_neighbor_clusters(batch, gap_agreement_weight=0.):
"""Assign each extra MSA sequence to its nearest neighbor in sampled MSA."""
# Determine how much weight we assign to each agreement. In theory, we could
# use a full blosum matrix here, but right now let's just down-weight gap
# agreement because it could be spurious.
# Never put weight on agreeing on BERT mask.
weights = jnp.array(
[1.] * 21 + [gap_agreement_weight] + [0.], dtype=jnp.float32)
msa_mask = batch['msa_mask']
msa_one_hot = jax.nn.one_hot(batch['msa'], 23)
extra_mask = batch['extra_msa_mask']
extra_one_hot = jax.nn.one_hot(batch['extra_msa'], 23)
msa_one_hot_masked = msa_mask[:, :, None] * msa_one_hot
extra_one_hot_masked = extra_mask[:, :, None] * extra_one_hot
agreement = jnp.einsum('mrc, nrc->nm', extra_one_hot_masked,
weights * msa_one_hot_masked)
cluster_assignment = jax.nn.softmax(1e3 * agreement, axis=0)
cluster_assignment *= jnp.einsum('mr, nr->mn', msa_mask, extra_mask)
cluster_count = jnp.sum(cluster_assignment, axis=-1)
cluster_count += 1. # We always include the sequence itself.
msa_sum = jnp.einsum('nm, mrc->nrc', cluster_assignment, extra_one_hot_masked)
msa_sum += msa_one_hot_masked
cluster_profile = msa_sum / cluster_count[:, None, None]
extra_deletion_matrix = batch['extra_deletion_matrix']
deletion_matrix = batch['deletion_matrix']
del_sum = jnp.einsum('nm, mc->nc', cluster_assignment,
extra_mask * extra_deletion_matrix)
del_sum += deletion_matrix # Original sequence.
cluster_deletion_mean = del_sum / cluster_count[:, None]
return cluster_profile, cluster_deletion_mean
def create_msa_feat(batch):
"""Create and concatenate MSA features."""
msa_1hot = jax.nn.one_hot(batch['msa'], 23)
deletion_matrix = batch['deletion_matrix']
has_deletion = jnp.clip(deletion_matrix, 0., 1.)[..., None]
deletion_value = (jnp.arctan(deletion_matrix / 3.) * (2. / jnp.pi))[..., None]
deletion_mean_value = (jnp.arctan(batch['cluster_deletion_mean'] / 3.) *
(2. / jnp.pi))[..., None]
msa_feat = [
msa_1hot,
has_deletion,
deletion_value,
batch['cluster_profile'],
deletion_mean_value
]
return jnp.concatenate(msa_feat, axis=-1)
def create_extra_msa_feature(batch, num_extra_msa):
"""Expand extra_msa into 1hot and concat with other extra msa features.
We do this as late as possible as the one_hot extra msa can be very large.
Args:
batch: a dictionary with the following keys:
* 'extra_msa': [num_seq, num_res] MSA that wasn't selected as a cluster
centre. Note - This isn't one-hotted.
* 'extra_deletion_matrix': [num_seq, num_res] Number of deletions at given
position.
num_extra_msa: Number of extra msa to use.
Returns:
Concatenated tensor of extra MSA features.
"""
# 23 = 20 amino acids + 'X' for unknown + gap + bert mask
extra_msa = batch['extra_msa'][:num_extra_msa]
deletion_matrix = batch['extra_deletion_matrix'][:num_extra_msa]
msa_1hot = jax.nn.one_hot(extra_msa, 23)
has_deletion = jnp.clip(deletion_matrix, 0., 1.)[..., None]
deletion_value = (jnp.arctan(deletion_matrix / 3.) * (2. / jnp.pi))[..., None]
extra_msa_mask = batch['extra_msa_mask'][:num_extra_msa]
return jnp.concatenate([msa_1hot, has_deletion, deletion_value],
axis=-1), extra_msa_mask
def sample_msa(key, batch, max_seq):
"""Sample MSA randomly, remaining sequences are stored as `extra_*`.
Args:
key: safe key for random number generation.
batch: batch to sample msa from.
max_seq: number of sequences to sample.
Returns:
Protein with sampled msa.
"""
# Sample uniformly among sequences with at least one non-masked position.
logits = (jnp.clip(jnp.sum(batch['msa_mask'], axis=-1), 0., 1.) - 1.) * 1e6
# The cluster_bias_mask can be used to preserve the first row (target
# sequence) for each chain, for example.
if 'cluster_bias_mask' not in batch:
cluster_bias_mask = jnp.pad(
jnp.zeros(batch['msa'].shape[0] - 1), (1, 0), constant_values=1.)
else:
cluster_bias_mask = batch['cluster_bias_mask']
logits += cluster_bias_mask * 1e6
index_order = gumbel_argsort_sample_idx(key.get(), logits)
sel_idx = index_order[:max_seq]
extra_idx = index_order[max_seq:]
for k in ['msa', 'deletion_matrix', 'msa_mask', 'bert_mask']:
if k in batch:
batch['extra_' + k] = batch[k][extra_idx]
batch[k] = batch[k][sel_idx]
return batch
def make_msa_profile(batch):
"""Compute the MSA profile."""
# Compute the profile for every residue (over all MSA sequences).
return utils.mask_mean(
batch['msa_mask'][:, :, None], jax.nn.one_hot(batch['msa'], 22), axis=0)
class AlphaFoldIteration(hk.Module):
"""A single recycling iteration of AlphaFold architecture.
Computes ensembled (averaged) representations from the provided features.
These representations are then passed to the various heads
that have been requested by the configuration file.
"""
def __init__(self, config, global_config, name='alphafold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
batch,
is_training,
return_representations=False,
safe_key=None):
if is_training:
num_ensemble = np.asarray(self.config.num_ensemble_train)
else:
num_ensemble = np.asarray(self.config.num_ensemble_eval)
# Compute representations for each MSA sample and average.
embedding_module = EmbeddingsAndEvoformer(
self.config.embeddings_and_evoformer, self.global_config)
repr_shape = hk.eval_shape(
lambda: embedding_module(batch, is_training))
representations = {
k: jnp.zeros(v.shape, v.dtype) for (k, v) in repr_shape.items()
}
def ensemble_body(x, unused_y):
"""Add into representations ensemble."""
del unused_y
representations, safe_key = x
safe_key, safe_subkey = safe_key.split()
representations_update = embedding_module(
batch, is_training, safe_key=safe_subkey)
for k in representations:
if k not in {'msa', 'true_msa', 'bert_mask'}:
representations[k] += representations_update[k] * (
1. / num_ensemble).astype(representations[k].dtype)
else:
representations[k] = representations_update[k]
return (representations, safe_key), None
(representations, _), _ = hk.scan(
ensemble_body, (representations, safe_key), None, length=num_ensemble)
self.representations = representations
self.batch = batch
self.heads = {}
for head_name, head_config in sorted(self.config.heads.items()):
if not head_config.weight:
continue # Do not instantiate zero-weight heads.
head_factory = {
'masked_msa':
modules.MaskedMsaHead,
'distogram':
modules.DistogramHead,
'structure_module':
folding_multimer.StructureModule,
'predicted_aligned_error':
modules.PredictedAlignedErrorHead,
'predicted_lddt':
modules.PredictedLDDTHead,
'experimentally_resolved':
modules.ExperimentallyResolvedHead,
}[head_name]
self.heads[head_name] = (head_config,
head_factory(head_config, self.global_config))
structure_module_output = None
if 'entity_id' in batch and 'all_atom_positions' in batch:
_, fold_module = self.heads['structure_module']
structure_module_output = fold_module(representations, batch, is_training)
ret = {}
ret['representations'] = representations
for name, (head_config, module) in self.heads.items():
if name == 'structure_module' and structure_module_output is not None:
ret[name] = structure_module_output
representations['structure_module'] = structure_module_output.pop('act')
# Skip confidence heads until StructureModule is executed.
elif name in {'predicted_lddt', 'predicted_aligned_error',
'experimentally_resolved'}:
continue
else:
ret[name] = module(representations, batch, is_training)
# Add confidence heads after StructureModule is executed.
if self.config.heads.get('predicted_lddt.weight', 0.0):
name = 'predicted_lddt'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
if self.config.heads.experimentally_resolved.weight:
name = 'experimentally_resolved'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
if self.config.heads.get('predicted_aligned_error.weight', 0.0):
name = 'predicted_aligned_error'
head_config, module = self.heads[name]
ret[name] = module(representations, batch, is_training)
# Will be used for ipTM computation.
ret[name]['asym_id'] = batch['asym_id']
return ret
class AlphaFold(hk.Module):
"""AlphaFold-Multimer model with recycling.
"""
def __init__(self, config, name='alphafold'):
super().__init__(name=name)
self.config = config
self.global_config = config.global_config
def __call__(
self,
batch,
is_training,
return_representations=False,
safe_key=None):
c = self.config
impl = AlphaFoldIteration(c, self.global_config)
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
elif isinstance(safe_key, jnp.ndarray):
safe_key = prng.SafeKey(safe_key)
assert isinstance(batch, dict)
num_res = batch['aatype'].shape[0]
def get_prev(ret):
new_prev = {
'prev_pos':
ret['structure_module']['final_atom_positions'],
'prev_msa_first_row': ret['representations']['msa_first_row'],
'prev_pair': ret['representations']['pair'],
}
return jax.tree_map(jax.lax.stop_gradient, new_prev)
def apply_network(prev, safe_key):
recycled_batch = {**batch, **prev}
return impl(
batch=recycled_batch,
is_training=is_training,
safe_key=safe_key)
prev = {}
emb_config = self.config.embeddings_and_evoformer
if emb_config.recycle_pos:
prev['prev_pos'] = jnp.zeros(
[num_res, residue_constants.atom_type_num, 3])
if emb_config.recycle_features:
prev['prev_msa_first_row'] = jnp.zeros(
[num_res, emb_config.msa_channel])
prev['prev_pair'] = jnp.zeros(
[num_res, num_res, emb_config.pair_channel])
if self.config.num_recycle:
if 'num_iter_recycling' in batch:
# Training time: num_iter_recycling is in batch.
# Value for each ensemble batch is the same, so arbitrarily taking 0-th.
num_iter = batch['num_iter_recycling'][0]
# Add insurance that even when ensembling, we will not run more
# recyclings than the model is configured to run.
num_iter = jnp.minimum(num_iter, c.num_recycle)
else:
# Eval mode or tests: use the maximum number of iterations.
num_iter = c.num_recycle
def distances(points):
"""Compute all pairwise distances for a set of points."""
return jnp.sqrt(jnp.sum((points[:, None] - points[None, :])**2,
axis=-1))
def recycle_body(x):
i, _, prev, safe_key = x
safe_key1, safe_key2 = safe_key.split() if c.resample_msa_in_recycling else safe_key.duplicate() # pylint: disable=line-too-long
ret = apply_network(prev=prev, safe_key=safe_key2)
return i+1, prev, get_prev(ret), safe_key1
def recycle_cond(x):
i, prev, next_in, _ = x
ca_idx = residue_constants.atom_order['CA']
sq_diff = jnp.square(distances(prev['prev_pos'][:, ca_idx, :]) -
distances(next_in['prev_pos'][:, ca_idx, :]))
mask = batch['seq_mask'][:, None] * batch['seq_mask'][None, :]
sq_diff = utils.mask_mean(mask, sq_diff)
# Early stopping criteria based on criteria used in
# AF2Complex: https://www.nature.com/articles/s41467-022-29394-2
diff = jnp.sqrt(sq_diff + 1e-8) # avoid bad numerics giving negatives
less_than_max_recycles = (i < num_iter)
has_exceeded_tolerance = (
(i == 0) | (diff > c.recycle_early_stop_tolerance))
return less_than_max_recycles & has_exceeded_tolerance
if hk.running_init():
num_recycles, _, prev, safe_key = recycle_body(
(0, prev, prev, safe_key))
else:
num_recycles, _, prev, safe_key = hk.while_loop(
recycle_cond,
recycle_body,
(0, prev, prev, safe_key))
else:
# No recycling.
num_recycles = 0
# Run extra iteration.
ret = apply_network(prev=prev, safe_key=safe_key)
if not return_representations:
del ret['representations']
ret['num_recycles'] = num_recycles
return ret
class EmbeddingsAndEvoformer(hk.Module):
"""Embeds the input data and runs Evoformer.
Produces the MSA, single and pair representations.
"""
def __init__(self, config, global_config, name='evoformer'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def _relative_encoding(self, batch):
"""Add relative position encodings.
For position (i, j), the value is (i-j) clipped to [-k, k] and one-hotted.
When not using 'use_chain_relative' the residue indices are used as is, e.g.
for heteromers relative positions will be computed using the positions in
the corresponding chains.
When using 'use_chain_relative' we add an extra bin that denotes
'different chain'. Furthermore we also provide the relative chain index
(i.e. sym_id) clipped and one-hotted to the network. And an extra feature
which denotes whether they belong to the same chain type, i.e. it's 0 if
they are in different heteromer chains and 1 otherwise.
Args:
batch: batch.
Returns:
Feature embedding using the features as described before.
"""
c = self.config
gc = self.global_config
rel_feats = []
pos = batch['residue_index']
asym_id = batch['asym_id']
asym_id_same = jnp.equal(asym_id[:, None], asym_id[None, :])
offset = pos[:, None] - pos[None, :]
dtype = jnp.bfloat16 if gc.bfloat16 else jnp.float32
clipped_offset = jnp.clip(
offset + c.max_relative_idx, a_min=0, a_max=2 * c.max_relative_idx)
if c.use_chain_relative:
final_offset = jnp.where(asym_id_same, clipped_offset,
(2 * c.max_relative_idx + 1) *
jnp.ones_like(clipped_offset))
rel_pos = jax.nn.one_hot(final_offset, 2 * c.max_relative_idx + 2)
rel_feats.append(rel_pos)
entity_id = batch['entity_id']
entity_id_same = jnp.equal(entity_id[:, None], entity_id[None, :])
rel_feats.append(entity_id_same.astype(rel_pos.dtype)[..., None])
sym_id = batch['sym_id']
rel_sym_id = sym_id[:, None] - sym_id[None, :]
max_rel_chain = c.max_relative_chain
clipped_rel_chain = jnp.clip(
rel_sym_id + max_rel_chain, a_min=0, a_max=2 * max_rel_chain)
final_rel_chain = jnp.where(entity_id_same, clipped_rel_chain,
(2 * max_rel_chain + 1) *
jnp.ones_like(clipped_rel_chain))
rel_chain = jax.nn.one_hot(final_rel_chain, 2 * c.max_relative_chain + 2)
rel_feats.append(rel_chain)
else:
rel_pos = jax.nn.one_hot(clipped_offset, 2 * c.max_relative_idx + 1)
rel_feats.append(rel_pos)
rel_feat = jnp.concatenate(rel_feats, axis=-1)
rel_feat = rel_feat.astype(dtype)
return common_modules.Linear(
c.pair_channel,
name='position_activations')(
rel_feat)
def __call__(self, batch, is_training, safe_key=None):
c = self.config
gc = self.global_config
batch = dict(batch)
dtype = jnp.bfloat16 if gc.bfloat16 else jnp.float32
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
output = {}
batch['msa_profile'] = make_msa_profile(batch)
with utils.bfloat16_context():
target_feat = jax.nn.one_hot(batch['aatype'], 21).astype(dtype)
preprocess_1d = common_modules.Linear(
c.msa_channel, name='preprocess_1d')(
target_feat)
safe_key, sample_key, mask_key = safe_key.split(3)
batch = sample_msa(sample_key, batch, c.num_msa)
batch = make_masked_msa(batch, mask_key, c.masked_msa)
(batch['cluster_profile'],
batch['cluster_deletion_mean']) = nearest_neighbor_clusters(batch)
msa_feat = create_msa_feat(batch).astype(dtype)
preprocess_msa = common_modules.Linear(
c.msa_channel, name='preprocess_msa')(
msa_feat)
msa_activations = jnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa
left_single = common_modules.Linear(
c.pair_channel, name='left_single')(
target_feat)
right_single = common_modules.Linear(
c.pair_channel, name='right_single')(
target_feat)
pair_activations = left_single[:, None] + right_single[None]
mask_2d = batch['seq_mask'][:, None] * batch['seq_mask'][None, :]
mask_2d = mask_2d.astype(dtype)
if c.recycle_pos:
prev_pseudo_beta = modules.pseudo_beta_fn(
batch['aatype'], batch['prev_pos'], None)
dgram = modules.dgram_from_positions(
prev_pseudo_beta, **self.config.prev_pos)
dgram = dgram.astype(dtype)
pair_activations += common_modules.Linear(
c.pair_channel, name='prev_pos_linear')(
dgram)
if c.recycle_features:
prev_msa_first_row = common_modules.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='prev_msa_first_row_norm')(
batch['prev_msa_first_row']).astype(dtype)
msa_activations = msa_activations.at[0].add(prev_msa_first_row)
pair_activations += common_modules.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='prev_pair_norm')(
batch['prev_pair']).astype(dtype)
if c.max_relative_idx:
pair_activations += self._relative_encoding(batch)
if c.template.enabled:
template_module = TemplateEmbedding(c.template, gc)
template_batch = {
'template_aatype': batch['template_aatype'],
'template_all_atom_positions': batch['template_all_atom_positions'],
'template_all_atom_mask': batch['template_all_atom_mask']
}
# Construct a mask such that only intra-chain template features are
# computed, since all templates are for each chain individually.
multichain_mask = batch['asym_id'][:, None] == batch['asym_id'][None, :]
safe_key, safe_subkey = safe_key.split()
template_act = template_module(
query_embedding=pair_activations,
template_batch=template_batch,
padding_mask_2d=mask_2d,
multichain_mask_2d=multichain_mask,
is_training=is_training,
safe_key=safe_subkey)
pair_activations += template_act
# Extra MSA stack.
(extra_msa_feat,
extra_msa_mask) = create_extra_msa_feature(batch, c.num_extra_msa)
extra_msa_activations = common_modules.Linear(
c.extra_msa_channel,
name='extra_msa_activations')(
extra_msa_feat).astype(dtype)
extra_msa_mask = extra_msa_mask.astype(dtype)
extra_evoformer_input = {
'msa': extra_msa_activations,
'pair': pair_activations,
}
extra_masks = {'msa': extra_msa_mask, 'pair': mask_2d}
extra_evoformer_iteration = modules.EvoformerIteration(
c.evoformer, gc, is_extra_msa=True, name='extra_msa_stack')
def extra_evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
extra_evoformer_output = extra_evoformer_iteration(
activations=act,
masks=extra_masks,
is_training=is_training,
safe_key=safe_subkey)
return (extra_evoformer_output, safe_key)
if gc.use_remat:
extra_evoformer_fn = hk.remat(extra_evoformer_fn)
safe_key, safe_subkey = safe_key.split()
extra_evoformer_stack = layer_stack.layer_stack(
c.extra_msa_stack_num_block)(
extra_evoformer_fn)
extra_evoformer_output, safe_key = extra_evoformer_stack(
(extra_evoformer_input, safe_subkey))
pair_activations = extra_evoformer_output['pair']
# Get the size of the MSA before potentially adding templates, so we
# can crop out the templates later.
num_msa_sequences = msa_activations.shape[0]
evoformer_input = {
'msa': msa_activations,
'pair': pair_activations,
}
evoformer_masks = {
'msa': batch['msa_mask'].astype(dtype),
'pair': mask_2d
}
if c.template.enabled:
template_features, template_masks = (
template_embedding_1d(
batch=batch, num_channel=c.msa_channel, global_config=gc))
evoformer_input['msa'] = jnp.concatenate(
[evoformer_input['msa'], template_features], axis=0)
evoformer_masks['msa'] = jnp.concatenate(
[evoformer_masks['msa'], template_masks], axis=0)
evoformer_iteration = modules.EvoformerIteration(
c.evoformer, gc, is_extra_msa=False, name='evoformer_iteration')
def evoformer_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
evoformer_output = evoformer_iteration(
activations=act,
masks=evoformer_masks,
is_training=is_training,
safe_key=safe_subkey)
return (evoformer_output, safe_key)
if gc.use_remat:
evoformer_fn = hk.remat(evoformer_fn)
safe_key, safe_subkey = safe_key.split()
evoformer_stack = layer_stack.layer_stack(c.evoformer_num_block)(
evoformer_fn)
def run_evoformer(evoformer_input):
evoformer_output, _ = evoformer_stack((evoformer_input, safe_subkey))
return evoformer_output
evoformer_output = run_evoformer(evoformer_input)
msa_activations = evoformer_output['msa']
pair_activations = evoformer_output['pair']
single_activations = common_modules.Linear(
c.seq_channel, name='single_activations')(
msa_activations[0])
output.update({
'single':
single_activations,
'pair':
pair_activations,
# Crop away template rows such that they are not used in MaskedMsaHead.
'msa':
msa_activations[:num_msa_sequences, :, :],
'msa_first_row':
msa_activations[0],
})
# Convert back to float32 if we're not saving memory.
if not gc.bfloat16_output:
for k, v in output.items():
if v.dtype == jnp.bfloat16:
output[k] = v.astype(jnp.float32)
return output
class TemplateEmbedding(hk.Module):
"""Embed a set of templates."""
def __init__(self, config, global_config, name='template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_batch, padding_mask_2d,
multichain_mask_2d, is_training,
safe_key=None):
"""Generate an embedding for a set of templates.
Args:
query_embedding: [num_res, num_res, num_channel] a query tensor that will
be used to attend over the templates to remove the num_templates
dimension.
template_batch: A dictionary containing:
`template_aatype`: [num_templates, num_res] aatype for each template.
`template_all_atom_positions`: [num_templates, num_res, 37, 3] atom
positions for all templates.
`template_all_atom_mask`: [num_templates, num_res, 37] mask for each
template.
padding_mask_2d: [num_res, num_res] Pair mask for attention operations.
multichain_mask_2d: [num_res, num_res] Mask indicating which residue pairs
are intra-chain, used to mask out residue distance based features
between chains.
is_training: bool indicating where we are running in training mode.
safe_key: random key generator.
Returns:
An embedding of size [num_res, num_res, num_channels]
"""
c = self.config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
num_templates = template_batch['template_aatype'].shape[0]
num_res, _, query_num_channels = query_embedding.shape
# Embed each template separately.
template_embedder = SingleTemplateEmbedding(self.config, self.global_config)
def partial_template_embedder(template_aatype,
template_all_atom_positions,
template_all_atom_mask,
unsafe_key):
safe_key = prng.SafeKey(unsafe_key)
return template_embedder(query_embedding,
template_aatype,
template_all_atom_positions,
template_all_atom_mask,
padding_mask_2d,
multichain_mask_2d,
is_training,
safe_key)
safe_key, unsafe_key = safe_key.split()
unsafe_keys = jax.random.split(unsafe_key._key, num_templates)
def scan_fn(carry, x):
return carry + partial_template_embedder(*x), None
scan_init = jnp.zeros((num_res, num_res, c.num_channels),
dtype=query_embedding.dtype)
summed_template_embeddings, _ = hk.scan(
scan_fn, scan_init,
(template_batch['template_aatype'],
template_batch['template_all_atom_positions'],
template_batch['template_all_atom_mask'], unsafe_keys))
embedding = summed_template_embeddings / num_templates
embedding = jax.nn.relu(embedding)
embedding = common_modules.Linear(
query_num_channels,
initializer='relu',
name='output_linear')(embedding)
return embedding
class SingleTemplateEmbedding(hk.Module):
"""Embed a single template."""
def __init__(self, config, global_config, name='single_template_embedding'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
padding_mask_2d, multichain_mask_2d, is_training,
safe_key):
"""Build the single template embedding graph.
Args:
query_embedding: (num_res, num_res, num_channels) - embedding of the
query sequence/msa.
template_aatype: [num_res] aatype for each template.
template_all_atom_positions: [num_res, 37, 3] atom positions for all
templates.
template_all_atom_mask: [num_res, 37] mask for each template.
padding_mask_2d: Padding mask (Note: this doesn't care if a template
exists, unlike the template_pseudo_beta_mask).
multichain_mask_2d: A mask indicating intra-chain residue pairs, used
to mask out between chain distances/features when templates are for
single chains.
is_training: Are we in training mode.
safe_key: Random key generator.
Returns:
A template embedding (num_res, num_res, num_channels).
"""
gc = self.global_config
c = self.config
assert padding_mask_2d.dtype == query_embedding.dtype
dtype = query_embedding.dtype
num_channels = self.config.num_channels
def construct_input(query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
multichain_mask_2d):
# Compute distogram feature for the template.
template_positions, pseudo_beta_mask = modules.pseudo_beta_fn(
template_aatype, template_all_atom_positions, template_all_atom_mask)
pseudo_beta_mask_2d = (pseudo_beta_mask[:, None] *
pseudo_beta_mask[None, :])
pseudo_beta_mask_2d *= multichain_mask_2d
template_dgram = modules.dgram_from_positions(
template_positions, **self.config.dgram_features)
template_dgram *= pseudo_beta_mask_2d[..., None]
template_dgram = template_dgram.astype(dtype)
pseudo_beta_mask_2d = pseudo_beta_mask_2d.astype(dtype)
to_concat = [(template_dgram, 1), (pseudo_beta_mask_2d, 0)]
aatype = jax.nn.one_hot(template_aatype, 22, axis=-1, dtype=dtype)
to_concat.append((aatype[None, :, :], 1))
to_concat.append((aatype[:, None, :], 1))
# Compute a feature representing the normalized vector between each
# backbone affine - i.e. in each residues local frame, what direction are
# each of the other residues.
raw_atom_pos = template_all_atom_positions
if gc.bfloat16:
# Vec3Arrays are required to be float32
raw_atom_pos = raw_atom_pos.astype(jnp.float32)
atom_pos = geometry.Vec3Array.from_array(raw_atom_pos)
rigid, backbone_mask = folding_multimer.make_backbone_affine(
atom_pos,
template_all_atom_mask,
template_aatype)
points = rigid.translation
rigid_vec = rigid[:, None].inverse().apply_to_point(points)
unit_vector = rigid_vec.normalized()
unit_vector = [unit_vector.x, unit_vector.y, unit_vector.z]
if gc.bfloat16:
unit_vector = [x.astype(jnp.bfloat16) for x in unit_vector]
backbone_mask = backbone_mask.astype(jnp.bfloat16)
backbone_mask_2d = backbone_mask[:, None] * backbone_mask[None, :]
backbone_mask_2d *= multichain_mask_2d
unit_vector = [x*backbone_mask_2d for x in unit_vector]
# Note that the backbone_mask takes into account C, CA and N (unlike
# pseudo beta mask which just needs CB) so we add both masks as features.
to_concat.extend([(x, 0) for x in unit_vector])
to_concat.append((backbone_mask_2d, 0))
query_embedding = common_modules.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='query_embedding_norm')(
query_embedding)
# Allow the template embedder to see the query embedding. Note this
# contains the position relative feature, so this is how the network knows
# which residues are next to each other.
to_concat.append((query_embedding, 1))
act = 0
for i, (x, n_input_dims) in enumerate(to_concat):
act += common_modules.Linear(
num_channels,
num_input_dims=n_input_dims,
initializer='relu',
name=f'template_pair_embedding_{i}')(x)
return act
act = construct_input(query_embedding, template_aatype,
template_all_atom_positions, template_all_atom_mask,
multichain_mask_2d)
template_iteration = TemplateEmbeddingIteration(
c.template_pair_stack, gc, name='template_embedding_iteration')
def template_iteration_fn(x):
act, safe_key = x
safe_key, safe_subkey = safe_key.split()
act = template_iteration(
act=act,
pair_mask=padding_mask_2d,
is_training=is_training,
safe_key=safe_subkey)
return (act, safe_key)
if gc.use_remat:
template_iteration_fn = hk.remat(template_iteration_fn)
safe_key, safe_subkey = safe_key.split()
template_stack = layer_stack.layer_stack(
c.template_pair_stack.num_block)(
template_iteration_fn)
act, safe_key = template_stack((act, safe_subkey))
act = common_modules.LayerNorm(
axis=[-1],
create_scale=True,
create_offset=True,
name='output_layer_norm')(
act)
return act
class TemplateEmbeddingIteration(hk.Module):
"""Single Iteration of Template Embedding."""
def __init__(self, config, global_config,
name='template_embedding_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self, act, pair_mask, is_training=True,
safe_key=None):
"""Build a single iteration of the template embedder.
Args:
act: [num_res, num_res, num_channel] Input pairwise activations.
pair_mask: [num_res, num_res] padding mask.
is_training: Whether to run in training mode.
safe_key: Safe pseudo-random generator key.
Returns:
[num_res, num_res, num_channel] tensor of activations.
"""
c = self.config
gc = self.global_config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
dropout_wrapper_fn = functools.partial(
modules.dropout_wrapper,
is_training=is_training,
global_config=gc)
safe_key, *sub_keys = safe_key.split(20)
sub_keys = iter(sub_keys)
act = dropout_wrapper_fn(
modules.TriangleMultiplication(c.triangle_multiplication_outgoing, gc,
name='triangle_multiplication_outgoing'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleMultiplication(c.triangle_multiplication_incoming, gc,
name='triangle_multiplication_incoming'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleAttention(c.triangle_attention_starting_node, gc,
name='triangle_attention_starting_node'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.TriangleAttention(c.triangle_attention_ending_node, gc,
name='triangle_attention_ending_node'),
act,
pair_mask,
safe_key=next(sub_keys))
act = dropout_wrapper_fn(
modules.Transition(c.pair_transition, gc,
name='pair_transition'),
act,
pair_mask,
safe_key=next(sub_keys))
return act
def template_embedding_1d(batch, num_channel, global_config):
"""Embed templates into an (num_res, num_templates, num_channels) embedding.
Args:
batch: A batch containing:
template_aatype, (num_templates, num_res) aatype for the templates.
template_all_atom_positions, (num_templates, num_residues, 37, 3) atom
positions for the templates.
template_all_atom_mask, (num_templates, num_residues, 37) atom mask for
each template.
num_channel: The number of channels in the output.
global_config: The global_config.
Returns:
An embedding of shape (num_templates, num_res, num_channels) and a mask of
shape (num_templates, num_res).
"""
# Embed the templates aatypes.
aatype_one_hot = jax.nn.one_hot(batch['template_aatype'], 22, axis=-1)
num_templates = batch['template_aatype'].shape[0]
all_chi_angles = []
all_chi_masks = []
for i in range(num_templates):
atom_pos = geometry.Vec3Array.from_array(
batch['template_all_atom_positions'][i, :, :, :])
template_chi_angles, template_chi_mask = all_atom_multimer.compute_chi_angles(
atom_pos,
batch['template_all_atom_mask'][i, :, :],
batch['template_aatype'][i, :])
all_chi_angles.append(template_chi_angles)
all_chi_masks.append(template_chi_mask)
chi_angles = jnp.stack(all_chi_angles, axis=0)
chi_mask = jnp.stack(all_chi_masks, axis=0)
template_features = jnp.concatenate([
aatype_one_hot,
jnp.sin(chi_angles) * chi_mask,
jnp.cos(chi_angles) * chi_mask,
chi_mask], axis=-1)
template_mask = chi_mask[:, :, 0]
if global_config.bfloat16:
template_features = template_features.astype(jnp.bfloat16)
template_mask = template_mask.astype(jnp.bfloat16)
template_activations = common_modules.Linear(
num_channel,
initializer='relu',
name='template_single_embedding')(
template_features)
template_activations = jax.nn.relu(template_activations)
template_activations = common_modules.Linear(
num_channel,
initializer='relu',
name='template_projection')(
template_activations)
return template_activations, template_mask
|
alphafold-main
|
alphafold/model/modules_multimer.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function to stack repeats of a layer function without shared parameters."""
import collections
import contextlib
import functools
import inspect
from typing import Any, Callable, Optional, Tuple, Union
import haiku as hk
import jax
import jax.numpy as jnp
LayerStackCarry = collections.namedtuple('LayerStackCarry', ['x', 'rng'])
LayerStackScanned = collections.namedtuple('LayerStackScanned',
['i', 'args_ys'])
# WrappedFn should take in arbitrarily nested `jnp.ndarray`, and return the
# exact same type. We cannot express this with `typing`. So we just use it
# to inform the user. In reality, the typing below will accept anything.
NestedArray = Any
WrappedFn = Callable[..., Union[NestedArray, Tuple[NestedArray]]]
def _check_no_varargs(f):
if list(inspect.signature(
f).parameters.values())[0].kind == inspect.Parameter.VAR_POSITIONAL:
raise ValueError(
'The function `f` should not have any `varargs` (that is *args) '
'argument. Instead, it should only use explicit positional'
'arguments.')
@contextlib.contextmanager
def nullcontext():
yield
def maybe_with_rng(key):
if key is not None:
return hk.with_rng(key)
else:
return nullcontext()
def maybe_fold_in(key, data):
if key is not None:
return jax.random.fold_in(key, data)
else:
return None
class _LayerStack(hk.Module):
"""Module to compose parameterized functions, implemented as a scan."""
def __init__(self,
count: int,
unroll: int,
name: Optional[str] = None):
"""Iterate a function `f` `count` times, with non-shared parameters."""
super().__init__(name=name)
self._count = count
self._unroll = unroll
def __call__(self, x, *args_ys):
count = self._count
if hk.running_init():
# At initialization time, we run just one layer but add an extra first
# dimension to every initialized tensor, making sure to use different
# random keys for different slices.
def creator(next_creator, shape, dtype, init, context):
del context
def multi_init(shape, dtype):
assert shape[0] == count
key = hk.maybe_next_rng_key()
def rng_context_init(slice_idx):
slice_key = maybe_fold_in(key, slice_idx)
with maybe_with_rng(slice_key):
return init(shape[1:], dtype)
return jax.vmap(rng_context_init)(jnp.arange(count))
return next_creator((count,) + tuple(shape), dtype, multi_init)
def getter(next_getter, value, context):
trailing_dims = len(context.original_shape) + 1
sliced_value = jax.lax.index_in_dim(
value, index=0, axis=value.ndim - trailing_dims, keepdims=False)
return next_getter(sliced_value)
with hk.experimental.custom_creator(
creator), hk.experimental.custom_getter(getter):
if len(args_ys) == 1 and args_ys[0] is None:
args0 = (None,)
else:
args0 = [
jax.lax.dynamic_index_in_dim(ys, 0, keepdims=False)
for ys in args_ys
]
x, z = self._call_wrapped(x, *args0)
if z is None:
return x, z
# Broadcast state to hold each layer state.
def broadcast_state(layer_state):
return jnp.broadcast_to(
layer_state, [count,] + list(layer_state.shape))
zs = jax.tree_util.tree_map(broadcast_state, z)
return x, zs
else:
# Use scan during apply, threading through random seed so that it's
# unique for each layer.
def layer(carry: LayerStackCarry, scanned: LayerStackScanned):
rng = carry.rng
def getter(next_getter, value, context):
# Getter slices the full param at the current loop index.
trailing_dims = len(context.original_shape) + 1
assert value.shape[value.ndim - trailing_dims] == count, (
f'Attempting to use a parameter stack of size '
f'{value.shape[value.ndim - trailing_dims]} for a LayerStack of '
f'size {count}.')
sliced_value = jax.lax.dynamic_index_in_dim(
value, scanned.i, axis=value.ndim - trailing_dims, keepdims=False)
return next_getter(sliced_value)
with hk.experimental.custom_getter(getter):
if rng is None:
out_x, z = self._call_wrapped(carry.x, *scanned.args_ys)
else:
rng, rng_ = jax.random.split(rng)
with hk.with_rng(rng_):
out_x, z = self._call_wrapped(carry.x, *scanned.args_ys)
return LayerStackCarry(x=out_x, rng=rng), z
carry = LayerStackCarry(x=x, rng=hk.maybe_next_rng_key())
scanned = LayerStackScanned(i=jnp.arange(count, dtype=jnp.int32),
args_ys=args_ys)
carry, zs = hk.scan(
layer, carry, scanned, length=count, unroll=self._unroll)
return carry.x, zs
def _call_wrapped(self,
x: jnp.ndarray,
*args,
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
raise NotImplementedError()
class _LayerStackNoState(_LayerStack):
"""_LayerStack impl with no per-layer state provided to the function."""
def __init__(self,
f: WrappedFn,
count: int,
unroll: int,
name: Optional[str] = None):
super().__init__(count=count, unroll=unroll, name=name)
_check_no_varargs(f)
self._f = f
@hk.transparent
def _call_wrapped(self, args, y):
del y
ret = self._f(*args)
if len(args) == 1:
# If the function takes a single argument, the wrapped function receives
# a tuple of length 1, and therefore it must return a tuple of length 1.
ret = (ret,)
return ret, None
class _LayerStackWithState(_LayerStack):
"""_LayerStack impl with per-layer state provided to the function."""
def __init__(self,
f: WrappedFn,
count: int,
unroll: int,
name: Optional[str] = None):
super().__init__(count=count, unroll=unroll, name=name)
self._f = f
@hk.transparent
def _call_wrapped(self, x, *args):
return self._f(x, *args)
def layer_stack(num_layers: int,
with_state=False,
unroll: int = 1,
name: Optional[str] = None):
"""Utility to wrap a Haiku function and recursively apply it to an input.
A function is valid if it uses only explicit position parameters, and
its return type matches its input type. The position parameters can be
arbitrarily nested structures with `jnp.ndarray` at the leaf nodes. Note
that kwargs are not supported, neither are functions with variable number
of parameters (specified by `*args`).
If `with_state=False` then the new, wrapped function can be understood as
performing the following:
```
for i in range(num_layers):
x = f(x)
return x
```
And if `with_state=True`, assuming `f` takes two arguments on top of `x`:
```
for i in range(num_layers):
x, zs[i] = f(x, ys_0[i], ys_1[i])
return x, zs
```
The code using `layer_stack` for the above function would be:
```
def f(x, y_0, y_1):
...
return new_x, z
x, zs = layer_stack.layer_stack(num_layers,
with_state=True)(f)(x, ys_0, ys_1)
```
Crucially, any parameters created inside `f` will not be shared across
iterations.
Args:
num_layers: The number of times to iterate the wrapped function.
with_state: Whether or not to pass per-layer state to the wrapped function.
unroll: the unroll used by `scan`.
name: Name of the Haiku context.
Returns:
Callable that will produce a layer stack when called with a valid function.
"""
def iterate(f):
if with_state:
@functools.wraps(f)
def wrapped(x, *args):
for ys in args:
assert ys.shape[0] == num_layers
return _LayerStackWithState(
f, num_layers, unroll=unroll, name=name)(x, *args)
else:
_check_no_varargs(f)
@functools.wraps(f)
def wrapped(*args):
ret = _LayerStackNoState(
f, num_layers, unroll=unroll, name=name)(args, None)[0]
if len(args) == 1:
# If the function takes a single argument, we must also return a
# single value, and not a tuple of length 1.
ret = ret[0]
return ret
return wrapped
return iterate
|
alphafold-main
|
alphafold/model/layer_stack.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops for all atom representations.
Generally we employ two different representations for all atom coordinates,
one is atom37 where each heavy atom corresponds to a given position in a 37
dimensional array, This mapping is non amino acid specific, but each slot
corresponds to an atom of a given name, for example slot 12 always corresponds
to 'C delta 1', positions that are not present for a given amino acid are
zeroed out and denoted by a mask.
The other representation we employ is called atom14, this is a more dense way
of representing atoms with 14 slots. Here a given slot will correspond to a
different kind of atom depending on amino acid type, for example slot 5
corresponds to 'N delta 2' for Aspargine, but to 'C delta 1' for Isoleucine.
14 is chosen because it is the maximum number of heavy atoms for any standard
amino acid.
The order of slots can be found in 'residue_constants.residue_atoms'.
Internally the model uses the atom14 representation because it is
computationally more efficient.
The internal atom14 representation is turned into the atom37 at the output of
the network to facilitate easier conversion to existing protein datastructures.
"""
from typing import Dict, Optional
from alphafold.common import residue_constants
from alphafold.model import r3
from alphafold.model import utils
import jax
import jax.numpy as jnp
import numpy as np
def squared_difference(x, y):
return jnp.square(x - y)
def get_chi_atom_indices():
"""Returns atom indices needed to compute chi angles for all residue types.
Returns:
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
in the order specified in residue_constants.restypes + unknown residue type
at the end. For chi angles which are not defined on the residue, the
positions indices are by default set to 0.
"""
chi_atom_indices = []
for residue_name in residue_constants.restypes:
residue_name = residue_constants.restype_1to3[residue_name]
residue_chi_angles = residue_constants.chi_angles_atoms[residue_name]
atom_indices = []
for chi_angle in residue_chi_angles:
atom_indices.append(
[residue_constants.atom_order[atom] for atom in chi_angle])
for _ in range(4 - len(atom_indices)):
atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA.
chi_atom_indices.append(atom_indices)
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
return jnp.asarray(chi_atom_indices)
def atom14_to_atom37(atom14_data: jnp.ndarray, # (N, 14, ...)
batch: Dict[str, jnp.ndarray]
) -> jnp.ndarray: # (N, 37, ...)
"""Convert atom14 to atom37 representation."""
assert len(atom14_data.shape) in [2, 3]
assert 'residx_atom37_to_atom14' in batch
assert 'atom37_atom_exists' in batch
atom37_data = utils.batched_gather(atom14_data,
batch['residx_atom37_to_atom14'],
batch_dims=1)
if len(atom14_data.shape) == 2:
atom37_data *= batch['atom37_atom_exists']
elif len(atom14_data.shape) == 3:
atom37_data *= batch['atom37_atom_exists'][:, :,
None].astype(atom37_data.dtype)
return atom37_data
def atom37_to_atom14(
atom37_data: jnp.ndarray, # (N, 37, ...)
batch: Dict[str, jnp.ndarray]) -> jnp.ndarray: # (N, 14, ...)
"""Convert atom14 to atom37 representation."""
assert len(atom37_data.shape) in [2, 3]
assert 'residx_atom14_to_atom37' in batch
assert 'atom14_atom_exists' in batch
atom14_data = utils.batched_gather(atom37_data,
batch['residx_atom14_to_atom37'],
batch_dims=1)
if len(atom37_data.shape) == 2:
atom14_data *= batch['atom14_atom_exists'].astype(atom14_data.dtype)
elif len(atom37_data.shape) == 3:
atom14_data *= batch['atom14_atom_exists'][:, :,
None].astype(atom14_data.dtype)
return atom14_data
def atom37_to_frames(
aatype: jnp.ndarray, # (...)
all_atom_positions: jnp.ndarray, # (..., 37, 3)
all_atom_mask: jnp.ndarray, # (..., 37)
) -> Dict[str, jnp.ndarray]:
"""Computes the frames for the up to 8 rigid groups for each residue.
The rigid groups are defined by the possible torsions in a given amino acid.
We group the atoms according to their dependence on the torsion angles into
"rigid groups". E.g., the position of atoms in the chi2-group depend on
chi1 and chi2, but do not depend on chi3 or chi4.
Jumper et al. (2021) Suppl. Table 2 and corresponding text.
Args:
aatype: Amino acid type, given as array with integers.
all_atom_positions: atom37 representation of all atom coordinates.
all_atom_mask: atom37 representation of mask on all atom coordinates.
Returns:
Dictionary containing:
* 'rigidgroups_gt_frames': 8 Frames corresponding to 'all_atom_positions'
represented as flat 12 dimensional array.
* 'rigidgroups_gt_exists': Mask denoting whether the atom positions for
the given frame are available in the ground truth, e.g. if they were
resolved in the experiment.
* 'rigidgroups_group_exists': Mask denoting whether given group is in
principle present for given amino acid type.
* 'rigidgroups_group_is_ambiguous': Mask denoting whether frame is
affected by naming ambiguity.
* 'rigidgroups_alt_gt_frames': 8 Frames with alternative atom renaming
corresponding to 'all_atom_positions' represented as flat
12 dimensional array.
"""
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
aatype_in_shape = aatype.shape
# If there is a batch axis, just flatten it away, and reshape everything
# back at the end of the function.
aatype = jnp.reshape(aatype, [-1])
all_atom_positions = jnp.reshape(all_atom_positions, [-1, 37, 3])
all_atom_mask = jnp.reshape(all_atom_mask, [-1, 37])
# Create an array with the atom names.
# shape (num_restypes, num_rigidgroups, 3_atoms): (21, 8, 3)
restype_rigidgroup_base_atom_names = np.full([21, 8, 3], '', dtype=object)
# 0: backbone frame
restype_rigidgroup_base_atom_names[:, 0, :] = ['C', 'CA', 'N']
# 3: 'psi-group'
restype_rigidgroup_base_atom_names[:, 3, :] = ['CA', 'C', 'O']
# 4,5,6,7: 'chi1,2,3,4-group'
for restype, restype_letter in enumerate(residue_constants.restypes):
resname = residue_constants.restype_1to3[restype_letter]
for chi_idx in range(4):
if residue_constants.chi_angles_mask[restype][chi_idx]:
atom_names = residue_constants.chi_angles_atoms[resname][chi_idx]
restype_rigidgroup_base_atom_names[
restype, chi_idx + 4, :] = atom_names[1:]
# Create mask for existing rigid groups.
restype_rigidgroup_mask = np.zeros([21, 8], dtype=np.float32)
restype_rigidgroup_mask[:, 0] = 1
restype_rigidgroup_mask[:, 3] = 1
restype_rigidgroup_mask[:20, 4:] = residue_constants.chi_angles_mask
# Translate atom names into atom37 indices.
lookuptable = residue_constants.atom_order.copy()
lookuptable[''] = 0
restype_rigidgroup_base_atom37_idx = np.vectorize(lambda x: lookuptable[x])(
restype_rigidgroup_base_atom_names)
# Compute the gather indices for all residues in the chain.
# shape (N, 8, 3)
residx_rigidgroup_base_atom37_idx = utils.batched_gather(
restype_rigidgroup_base_atom37_idx, aatype)
# Gather the base atom positions for each rigid group.
base_atom_pos = utils.batched_gather(
all_atom_positions,
residx_rigidgroup_base_atom37_idx,
batch_dims=1)
# Compute the Rigids.
gt_frames = r3.rigids_from_3_points(
point_on_neg_x_axis=r3.vecs_from_tensor(base_atom_pos[:, :, 0, :]),
origin=r3.vecs_from_tensor(base_atom_pos[:, :, 1, :]),
point_on_xy_plane=r3.vecs_from_tensor(base_atom_pos[:, :, 2, :])
)
# Compute a mask whether the group exists.
# (N, 8)
group_exists = utils.batched_gather(restype_rigidgroup_mask, aatype)
# Compute a mask whether ground truth exists for the group
gt_atoms_exist = utils.batched_gather( # shape (N, 8, 3)
all_atom_mask.astype(jnp.float32),
residx_rigidgroup_base_atom37_idx,
batch_dims=1)
gt_exists = jnp.min(gt_atoms_exist, axis=-1) * group_exists # (N, 8)
# Adapt backbone frame to old convention (mirror x-axis and z-axis).
rots = np.tile(np.eye(3, dtype=np.float32), [8, 1, 1])
rots[0, 0, 0] = -1
rots[0, 2, 2] = -1
gt_frames = r3.rigids_mul_rots(gt_frames, r3.rots_from_tensor3x3(rots))
# The frames for ambiguous rigid groups are just rotated by 180 degree around
# the x-axis. The ambiguous group is always the last chi-group.
restype_rigidgroup_is_ambiguous = np.zeros([21, 8], dtype=np.float32)
restype_rigidgroup_rots = np.tile(np.eye(3, dtype=np.float32), [21, 8, 1, 1])
for resname, _ in residue_constants.residue_atom_renaming_swaps.items():
restype = residue_constants.restype_order[
residue_constants.restype_3to1[resname]]
chi_idx = int(sum(residue_constants.chi_angles_mask[restype]) - 1)
restype_rigidgroup_is_ambiguous[restype, chi_idx + 4] = 1
restype_rigidgroup_rots[restype, chi_idx + 4, 1, 1] = -1
restype_rigidgroup_rots[restype, chi_idx + 4, 2, 2] = -1
# Gather the ambiguity information for each residue.
residx_rigidgroup_is_ambiguous = utils.batched_gather(
restype_rigidgroup_is_ambiguous, aatype)
residx_rigidgroup_ambiguity_rot = utils.batched_gather(
restype_rigidgroup_rots, aatype)
# Create the alternative ground truth frames.
alt_gt_frames = r3.rigids_mul_rots(
gt_frames, r3.rots_from_tensor3x3(residx_rigidgroup_ambiguity_rot))
gt_frames_flat12 = r3.rigids_to_tensor_flat12(gt_frames)
alt_gt_frames_flat12 = r3.rigids_to_tensor_flat12(alt_gt_frames)
# reshape back to original residue layout
gt_frames_flat12 = jnp.reshape(gt_frames_flat12, aatype_in_shape + (8, 12))
gt_exists = jnp.reshape(gt_exists, aatype_in_shape + (8,))
group_exists = jnp.reshape(group_exists, aatype_in_shape + (8,))
gt_frames_flat12 = jnp.reshape(gt_frames_flat12, aatype_in_shape + (8, 12))
residx_rigidgroup_is_ambiguous = jnp.reshape(residx_rigidgroup_is_ambiguous,
aatype_in_shape + (8,))
alt_gt_frames_flat12 = jnp.reshape(alt_gt_frames_flat12,
aatype_in_shape + (8, 12,))
return {
'rigidgroups_gt_frames': gt_frames_flat12, # (..., 8, 12)
'rigidgroups_gt_exists': gt_exists, # (..., 8)
'rigidgroups_group_exists': group_exists, # (..., 8)
'rigidgroups_group_is_ambiguous':
residx_rigidgroup_is_ambiguous, # (..., 8)
'rigidgroups_alt_gt_frames': alt_gt_frames_flat12, # (..., 8, 12)
}
def atom37_to_torsion_angles(
aatype: jnp.ndarray, # (B, N)
all_atom_pos: jnp.ndarray, # (B, N, 37, 3)
all_atom_mask: jnp.ndarray, # (B, N, 37)
placeholder_for_undefined=False,
) -> Dict[str, jnp.ndarray]:
"""Computes the 7 torsion angles (in sin, cos encoding) for each residue.
The 7 torsion angles are in the order
'[pre_omega, phi, psi, chi_1, chi_2, chi_3, chi_4]',
here pre_omega denotes the omega torsion angle between the given amino acid
and the previous amino acid.
Args:
aatype: Amino acid type, given as array with integers.
all_atom_pos: atom37 representation of all atom coordinates.
all_atom_mask: atom37 representation of mask on all atom coordinates.
placeholder_for_undefined: flag denoting whether to set masked torsion
angles to zero.
Returns:
Dict containing:
* 'torsion_angles_sin_cos': Array with shape (B, N, 7, 2) where the final
2 dimensions denote sin and cos respectively
* 'alt_torsion_angles_sin_cos': same as 'torsion_angles_sin_cos', but
with the angle shifted by pi for all chi angles affected by the naming
ambiguities.
* 'torsion_angles_mask': Mask for which chi angles are present.
"""
# Map aatype > 20 to 'Unknown' (20).
aatype = jnp.minimum(aatype, 20)
# Compute the backbone angles.
num_batch, num_res = aatype.shape
pad = jnp.zeros([num_batch, 1, 37, 3], jnp.float32)
prev_all_atom_pos = jnp.concatenate([pad, all_atom_pos[:, :-1, :, :]], axis=1)
pad = jnp.zeros([num_batch, 1, 37], jnp.float32)
prev_all_atom_mask = jnp.concatenate([pad, all_atom_mask[:, :-1, :]], axis=1)
# For each torsion angle collect the 4 atom positions that define this angle.
# shape (B, N, atoms=4, xyz=3)
pre_omega_atom_pos = jnp.concatenate(
[prev_all_atom_pos[:, :, 1:3, :], # prev CA, C
all_atom_pos[:, :, 0:2, :] # this N, CA
], axis=-2)
phi_atom_pos = jnp.concatenate(
[prev_all_atom_pos[:, :, 2:3, :], # prev C
all_atom_pos[:, :, 0:3, :] # this N, CA, C
], axis=-2)
psi_atom_pos = jnp.concatenate(
[all_atom_pos[:, :, 0:3, :], # this N, CA, C
all_atom_pos[:, :, 4:5, :] # this O
], axis=-2)
# Collect the masks from these atoms.
# Shape [batch, num_res]
pre_omega_mask = (
jnp.prod(prev_all_atom_mask[:, :, 1:3], axis=-1) # prev CA, C
* jnp.prod(all_atom_mask[:, :, 0:2], axis=-1)) # this N, CA
phi_mask = (
prev_all_atom_mask[:, :, 2] # prev C
* jnp.prod(all_atom_mask[:, :, 0:3], axis=-1)) # this N, CA, C
psi_mask = (
jnp.prod(all_atom_mask[:, :, 0:3], axis=-1) * # this N, CA, C
all_atom_mask[:, :, 4]) # this O
# Collect the atoms for the chi-angles.
# Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4].
chi_atom_indices = get_chi_atom_indices()
# Select atoms to compute chis. Shape: [batch, num_res, chis=4, atoms=4].
atom_indices = utils.batched_gather(
params=chi_atom_indices, indices=aatype, axis=0, batch_dims=0)
# Gather atom positions. Shape: [batch, num_res, chis=4, atoms=4, xyz=3].
chis_atom_pos = utils.batched_gather(
params=all_atom_pos, indices=atom_indices, axis=-2,
batch_dims=2)
# Copy the chi angle mask, add the UNKNOWN residue. Shape: [restypes, 4].
chi_angles_mask = list(residue_constants.chi_angles_mask)
chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
chi_angles_mask = jnp.asarray(chi_angles_mask)
# Compute the chi angle mask. I.e. which chis angles exist according to the
# aatype. Shape [batch, num_res, chis=4].
chis_mask = utils.batched_gather(params=chi_angles_mask, indices=aatype,
axis=0, batch_dims=0)
# Constrain the chis_mask to those chis, where the ground truth coordinates of
# all defining four atoms are available.
# Gather the chi angle atoms mask. Shape: [batch, num_res, chis=4, atoms=4].
chi_angle_atoms_mask = utils.batched_gather(
params=all_atom_mask, indices=atom_indices, axis=-1,
batch_dims=2)
# Check if all 4 chi angle atoms were set. Shape: [batch, num_res, chis=4].
chi_angle_atoms_mask = jnp.prod(chi_angle_atoms_mask, axis=[-1])
chis_mask = chis_mask * (chi_angle_atoms_mask).astype(jnp.float32)
# Stack all torsion angle atom positions.
# Shape (B, N, torsions=7, atoms=4, xyz=3)
torsions_atom_pos = jnp.concatenate(
[pre_omega_atom_pos[:, :, None, :, :],
phi_atom_pos[:, :, None, :, :],
psi_atom_pos[:, :, None, :, :],
chis_atom_pos
], axis=2)
# Stack up masks for all torsion angles.
# shape (B, N, torsions=7)
torsion_angles_mask = jnp.concatenate(
[pre_omega_mask[:, :, None],
phi_mask[:, :, None],
psi_mask[:, :, None],
chis_mask
], axis=2)
# Create a frame from the first three atoms:
# First atom: point on x-y-plane
# Second atom: point on negative x-axis
# Third atom: origin
# r3.Rigids (B, N, torsions=7)
torsion_frames = r3.rigids_from_3_points(
point_on_neg_x_axis=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 1, :]),
origin=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 2, :]),
point_on_xy_plane=r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 0, :]))
# Compute the position of the forth atom in this frame (y and z coordinate
# define the chi angle)
# r3.Vecs (B, N, torsions=7)
forth_atom_rel_pos = r3.rigids_mul_vecs(
r3.invert_rigids(torsion_frames),
r3.vecs_from_tensor(torsions_atom_pos[:, :, :, 3, :]))
# Normalize to have the sin and cos of the torsion angle.
# jnp.ndarray (B, N, torsions=7, sincos=2)
torsion_angles_sin_cos = jnp.stack(
[forth_atom_rel_pos.z, forth_atom_rel_pos.y], axis=-1)
torsion_angles_sin_cos /= jnp.sqrt(
jnp.sum(jnp.square(torsion_angles_sin_cos), axis=-1, keepdims=True)
+ 1e-8)
# Mirror psi, because we computed it from the Oxygen-atom.
torsion_angles_sin_cos *= jnp.asarray(
[1., 1., -1., 1., 1., 1., 1.])[None, None, :, None]
# Create alternative angles for ambiguous atom names.
chi_is_ambiguous = utils.batched_gather(
jnp.asarray(residue_constants.chi_pi_periodic), aatype)
mirror_torsion_angles = jnp.concatenate(
[jnp.ones([num_batch, num_res, 3]),
1.0 - 2.0 * chi_is_ambiguous], axis=-1)
alt_torsion_angles_sin_cos = (
torsion_angles_sin_cos * mirror_torsion_angles[:, :, :, None])
if placeholder_for_undefined:
# Add placeholder torsions in place of undefined torsion angles
# (e.g. N-terminus pre-omega)
placeholder_torsions = jnp.stack([
jnp.ones(torsion_angles_sin_cos.shape[:-1]),
jnp.zeros(torsion_angles_sin_cos.shape[:-1])
], axis=-1)
torsion_angles_sin_cos = torsion_angles_sin_cos * torsion_angles_mask[
..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])
alt_torsion_angles_sin_cos = alt_torsion_angles_sin_cos * torsion_angles_mask[
..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])
return {
'torsion_angles_sin_cos': torsion_angles_sin_cos, # (B, N, 7, 2)
'alt_torsion_angles_sin_cos': alt_torsion_angles_sin_cos, # (B, N, 7, 2)
'torsion_angles_mask': torsion_angles_mask # (B, N, 7)
}
def torsion_angles_to_frames(
aatype: jnp.ndarray, # (N)
backb_to_global: r3.Rigids, # (N)
torsion_angles_sin_cos: jnp.ndarray # (N, 7, 2)
) -> r3.Rigids: # (N, 8)
"""Compute rigid group frames from torsion angles.
Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" lines 2-10
Jumper et al. (2021) Suppl. Alg. 25 "makeRotX"
Args:
aatype: aatype for each residue
backb_to_global: Rigid transformations describing transformation from
backbone frame to global frame.
torsion_angles_sin_cos: sin and cosine of the 7 torsion angles
Returns:
Frames corresponding to all the Sidechain Rigid Transforms
"""
assert len(aatype.shape) == 1
assert len(backb_to_global.rot.xx.shape) == 1
assert len(torsion_angles_sin_cos.shape) == 3
assert torsion_angles_sin_cos.shape[1] == 7
assert torsion_angles_sin_cos.shape[2] == 2
# Gather the default frames for all rigid groups.
# r3.Rigids with shape (N, 8)
m = utils.batched_gather(residue_constants.restype_rigid_group_default_frame,
aatype)
default_frames = r3.rigids_from_tensor4x4(m)
# Create the rotation matrices according to the given angles (each frame is
# defined such that its rotation is around the x-axis).
sin_angles = torsion_angles_sin_cos[..., 0]
cos_angles = torsion_angles_sin_cos[..., 1]
# insert zero rotation for backbone group.
num_residues, = aatype.shape
sin_angles = jnp.concatenate([jnp.zeros([num_residues, 1]), sin_angles],
axis=-1)
cos_angles = jnp.concatenate([jnp.ones([num_residues, 1]), cos_angles],
axis=-1)
zeros = jnp.zeros_like(sin_angles)
ones = jnp.ones_like(sin_angles)
# all_rots are r3.Rots with shape (N, 8)
all_rots = r3.Rots(ones, zeros, zeros,
zeros, cos_angles, -sin_angles,
zeros, sin_angles, cos_angles)
# Apply rotations to the frames.
all_frames = r3.rigids_mul_rots(default_frames, all_rots)
# chi2, chi3, and chi4 frames do not transform to the backbone frame but to
# the previous frame. So chain them up accordingly.
chi2_frame_to_frame = jax.tree_map(lambda x: x[:, 5], all_frames)
chi3_frame_to_frame = jax.tree_map(lambda x: x[:, 6], all_frames)
chi4_frame_to_frame = jax.tree_map(lambda x: x[:, 7], all_frames)
chi1_frame_to_backb = jax.tree_map(lambda x: x[:, 4], all_frames)
chi2_frame_to_backb = r3.rigids_mul_rigids(chi1_frame_to_backb,
chi2_frame_to_frame)
chi3_frame_to_backb = r3.rigids_mul_rigids(chi2_frame_to_backb,
chi3_frame_to_frame)
chi4_frame_to_backb = r3.rigids_mul_rigids(chi3_frame_to_backb,
chi4_frame_to_frame)
# Recombine them to a r3.Rigids with shape (N, 8).
def _concat_frames(xall, x5, x6, x7):
return jnp.concatenate(
[xall[:, 0:5], x5[:, None], x6[:, None], x7[:, None]], axis=-1)
all_frames_to_backb = jax.tree_map(
_concat_frames,
all_frames,
chi2_frame_to_backb,
chi3_frame_to_backb,
chi4_frame_to_backb)
# Create the global frames.
# shape (N, 8)
all_frames_to_global = r3.rigids_mul_rigids(
jax.tree_map(lambda x: x[:, None], backb_to_global),
all_frames_to_backb)
return all_frames_to_global
def frames_and_literature_positions_to_atom14_pos(
aatype: jnp.ndarray, # (N)
all_frames_to_global: r3.Rigids # (N, 8)
) -> r3.Vecs: # (N, 14)
"""Put atom literature positions (atom14 encoding) in each rigid group.
Jumper et al. (2021) Suppl. Alg. 24 "computeAllAtomCoordinates" line 11
Args:
aatype: aatype for each residue.
all_frames_to_global: All per residue coordinate frames.
Returns:
Positions of all atom coordinates in global frame.
"""
# Pick the appropriate transform for every atom.
residx_to_group_idx = utils.batched_gather(
residue_constants.restype_atom14_to_rigid_group, aatype)
group_mask = jax.nn.one_hot(
residx_to_group_idx, num_classes=8) # shape (N, 14, 8)
# r3.Rigids with shape (N, 14)
map_atoms_to_global = jax.tree_map(
lambda x: jnp.sum(x[:, None, :] * group_mask, axis=-1),
all_frames_to_global)
# Gather the literature atom positions for each residue.
# r3.Vecs with shape (N, 14)
lit_positions = r3.vecs_from_tensor(
utils.batched_gather(
residue_constants.restype_atom14_rigid_group_positions, aatype))
# Transform each atom from its local frame to the global frame.
# r3.Vecs with shape (N, 14)
pred_positions = r3.rigids_mul_vecs(map_atoms_to_global, lit_positions)
# Mask out non-existing atoms.
mask = utils.batched_gather(residue_constants.restype_atom14_mask, aatype)
pred_positions = jax.tree_map(lambda x: x * mask, pred_positions)
return pred_positions
def extreme_ca_ca_distance_violations(
pred_atom_positions: jnp.ndarray, # (N, 37(14), 3)
pred_atom_mask: jnp.ndarray, # (N, 37(14))
residue_index: jnp.ndarray, # (N)
max_angstrom_tolerance=1.5
) -> jnp.ndarray:
"""Counts residues whose Ca is a large distance from its neighbour.
Measures the fraction of CA-CA pairs between consecutive amino acids that are
more than 'max_angstrom_tolerance' apart.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
max_angstrom_tolerance: Maximum distance allowed to not count as violation.
Returns:
Fraction of consecutive CA-CA pairs with violation.
"""
this_ca_pos = pred_atom_positions[:-1, 1, :] # (N - 1, 3)
this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1)
next_ca_pos = pred_atom_positions[1:, 1, :] # (N - 1, 3)
next_ca_mask = pred_atom_mask[1:, 1] # (N - 1)
has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(
jnp.float32)
ca_ca_distance = jnp.sqrt(
1e-6 + jnp.sum(squared_difference(this_ca_pos, next_ca_pos), axis=-1))
violations = (ca_ca_distance -
residue_constants.ca_ca) > max_angstrom_tolerance
mask = this_ca_mask * next_ca_mask * has_no_gap_mask
return utils.mask_mean(mask=mask, value=violations)
def between_residue_bond_loss(
pred_atom_positions: jnp.ndarray, # (N, 37(14), 3)
pred_atom_mask: jnp.ndarray, # (N, 37(14))
residue_index: jnp.ndarray, # (N)
aatype: jnp.ndarray, # (N)
tolerance_factor_soft=12.0,
tolerance_factor_hard=12.0
) -> Dict[str, jnp.ndarray]:
"""Flat-bottom loss to penalize structural violations between residues.
This is a loss penalizing any violation of the geometry around the peptide
bond between consecutive amino acids. This loss corresponds to
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 44, 45.
Args:
pred_atom_positions: Atom positions in atom37/14 representation
pred_atom_mask: Atom mask in atom37/14 representation
residue_index: Residue index for given amino acid, this is assumed to be
monotonically increasing.
aatype: Amino acid type of given residue
tolerance_factor_soft: soft tolerance factor measured in standard deviations
of pdb distributions
tolerance_factor_hard: hard tolerance factor measured in standard deviations
of pdb distributions
Returns:
Dict containing:
* 'c_n_loss_mean': Loss for peptide bond length violations
* 'ca_c_n_loss_mean': Loss for violations of bond angle around C spanned
by CA, C, N
* 'c_n_ca_loss_mean': Loss for violations of bond angle around N spanned
by C, N, CA
* 'per_residue_loss_sum': sum of all losses for each residue
* 'per_residue_violation_mask': mask denoting all residues with violation
present.
"""
assert len(pred_atom_positions.shape) == 3
assert len(pred_atom_mask.shape) == 2
assert len(residue_index.shape) == 1
assert len(aatype.shape) == 1
# Get the positions of the relevant backbone atoms.
this_ca_pos = pred_atom_positions[:-1, 1, :] # (N - 1, 3)
this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1)
this_c_pos = pred_atom_positions[:-1, 2, :] # (N - 1, 3)
this_c_mask = pred_atom_mask[:-1, 2] # (N - 1)
next_n_pos = pred_atom_positions[1:, 0, :] # (N - 1, 3)
next_n_mask = pred_atom_mask[1:, 0] # (N - 1)
next_ca_pos = pred_atom_positions[1:, 1, :] # (N - 1, 3)
next_ca_mask = pred_atom_mask[1:, 1] # (N - 1)
has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(
jnp.float32)
# Compute loss for the C--N bond.
c_n_bond_length = jnp.sqrt(
1e-6 + jnp.sum(squared_difference(this_c_pos, next_n_pos), axis=-1))
# The C-N bond to proline has slightly different length because of the ring.
next_is_proline = (
aatype[1:] == residue_constants.resname_to_idx['PRO']).astype(jnp.float32)
gt_length = (
(1. - next_is_proline) * residue_constants.between_res_bond_length_c_n[0]
+ next_is_proline * residue_constants.between_res_bond_length_c_n[1])
gt_stddev = (
(1. - next_is_proline) *
residue_constants.between_res_bond_length_stddev_c_n[0] +
next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[1])
c_n_bond_length_error = jnp.sqrt(1e-6 +
jnp.square(c_n_bond_length - gt_length))
c_n_loss_per_residue = jax.nn.relu(
c_n_bond_length_error - tolerance_factor_soft * gt_stddev)
mask = this_c_mask * next_n_mask * has_no_gap_mask
c_n_loss = jnp.sum(mask * c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6)
c_n_violation_mask = mask * (
c_n_bond_length_error > (tolerance_factor_hard * gt_stddev))
# Compute loss for the angles.
ca_c_bond_length = jnp.sqrt(1e-6 + jnp.sum(
squared_difference(this_ca_pos, this_c_pos), axis=-1))
n_ca_bond_length = jnp.sqrt(1e-6 + jnp.sum(
squared_difference(next_n_pos, next_ca_pos), axis=-1))
c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[:, None]
c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[:, None]
n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[:, None]
ca_c_n_cos_angle = jnp.sum(c_ca_unit_vec * c_n_unit_vec, axis=-1)
gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
gt_stddev = residue_constants.between_res_bond_length_stddev_c_n[0]
ca_c_n_cos_angle_error = jnp.sqrt(
1e-6 + jnp.square(ca_c_n_cos_angle - gt_angle))
ca_c_n_loss_per_residue = jax.nn.relu(
ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev)
mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
ca_c_n_loss = jnp.sum(mask * ca_c_n_loss_per_residue) / (jnp.sum(mask) + 1e-6)
ca_c_n_violation_mask = mask * (ca_c_n_cos_angle_error >
(tolerance_factor_hard * gt_stddev))
c_n_ca_cos_angle = jnp.sum((-c_n_unit_vec) * n_ca_unit_vec, axis=-1)
gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
c_n_ca_cos_angle_error = jnp.sqrt(
1e-6 + jnp.square(c_n_ca_cos_angle - gt_angle))
c_n_ca_loss_per_residue = jax.nn.relu(
c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev)
mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
c_n_ca_loss = jnp.sum(mask * c_n_ca_loss_per_residue) / (jnp.sum(mask) + 1e-6)
c_n_ca_violation_mask = mask * (
c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev))
# Compute a per residue loss (equally distribute the loss to both
# neighbouring residues).
per_residue_loss_sum = (c_n_loss_per_residue +
ca_c_n_loss_per_residue +
c_n_ca_loss_per_residue)
per_residue_loss_sum = 0.5 * (jnp.pad(per_residue_loss_sum, [[0, 1]]) +
jnp.pad(per_residue_loss_sum, [[1, 0]]))
# Compute hard violations.
violation_mask = jnp.max(
jnp.stack([c_n_violation_mask,
ca_c_n_violation_mask,
c_n_ca_violation_mask]), axis=0)
violation_mask = jnp.maximum(
jnp.pad(violation_mask, [[0, 1]]),
jnp.pad(violation_mask, [[1, 0]]))
return {'c_n_loss_mean': c_n_loss, # shape ()
'ca_c_n_loss_mean': ca_c_n_loss, # shape ()
'c_n_ca_loss_mean': c_n_ca_loss, # shape ()
'per_residue_loss_sum': per_residue_loss_sum, # shape (N)
'per_residue_violation_mask': violation_mask # shape (N)
}
def between_residue_clash_loss(
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
atom14_atom_radius: jnp.ndarray, # (N, 14)
residue_index: jnp.ndarray, # (N)
overlap_tolerance_soft=1.5,
overlap_tolerance_hard=1.5
) -> Dict[str, jnp.ndarray]:
"""Loss to penalize steric clashes between residues.
This is a loss penalizing any steric clashes due to non bonded atoms in
different peptides coming too close. This loss corresponds to the part with
different residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_atom_radius: Van der Waals radius for each atom.
residue_index: Residue index for given amino acid.
overlap_tolerance_soft: Soft tolerance factor.
overlap_tolerance_hard: Hard tolerance factor.
Returns:
Dict containing:
* 'mean_loss': average clash loss
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
assert len(atom14_atom_radius.shape) == 2
assert len(residue_index.shape) == 1
# Create the distance matrix.
# (N, N, 14, 14)
dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, None, :, None, :],
atom14_pred_positions[None, :, None, :, :]),
axis=-1))
# Create the mask for valid distances.
# shape (N, N, 14, 14)
dists_mask = (atom14_atom_exists[:, None, :, None] *
atom14_atom_exists[None, :, None, :])
# Mask out all the duplicate entries in the lower triangular matrix.
# Also mask out the diagonal (atom-pairs from the same residue) -- these atoms
# are handled separately.
dists_mask *= (
residue_index[:, None, None, None] < residue_index[None, :, None, None])
# Backbone C--N bond between subsequent residues is no clash.
c_one_hot = jax.nn.one_hot(2, num_classes=14)
n_one_hot = jax.nn.one_hot(0, num_classes=14)
neighbour_mask = ((residue_index[:, None, None, None] +
1) == residue_index[None, :, None, None])
c_n_bonds = neighbour_mask * c_one_hot[None, None, :,
None] * n_one_hot[None, None, None, :]
dists_mask *= (1. - c_n_bonds)
# Disulfide bridge between two cysteines is no clash.
cys_sg_idx = residue_constants.restype_name_to_atom14_names['CYS'].index('SG')
cys_sg_one_hot = jax.nn.one_hot(cys_sg_idx, num_classes=14)
disulfide_bonds = (cys_sg_one_hot[None, None, :, None] *
cys_sg_one_hot[None, None, None, :])
dists_mask *= (1. - disulfide_bonds)
# Compute the lower bound for the allowed distances.
# shape (N, N, 14, 14)
dists_lower_bound = dists_mask * (atom14_atom_radius[:, None, :, None] +
atom14_atom_radius[None, :, None, :])
# Compute the error.
# shape (N, N, 14, 14)
dists_to_low_error = dists_mask * jax.nn.relu(
dists_lower_bound - overlap_tolerance_soft - dists)
# Compute the mean loss.
# shape ()
mean_loss = (jnp.sum(dists_to_low_error)
/ (1e-6 + jnp.sum(dists_mask)))
# Compute the per atom loss sum.
# shape (N, 14)
per_atom_loss_sum = (jnp.sum(dists_to_low_error, axis=[0, 2]) +
jnp.sum(dists_to_low_error, axis=[1, 3]))
# Compute the hard clash mask.
# shape (N, N, 14, 14)
clash_mask = dists_mask * (
dists < (dists_lower_bound - overlap_tolerance_hard))
# Compute the per atom clash.
# shape (N, 14)
per_atom_clash_mask = jnp.maximum(
jnp.max(clash_mask, axis=[0, 2]),
jnp.max(clash_mask, axis=[1, 3]))
return {'mean_loss': mean_loss, # shape ()
'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14)
'per_atom_clash_mask': per_atom_clash_mask # shape (N, 14)
}
def within_residue_violations(
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
atom14_dists_lower_bound: jnp.ndarray, # (N, 14, 14)
atom14_dists_upper_bound: jnp.ndarray, # (N, 14, 14)
tighten_bounds_for_loss=0.0,
) -> Dict[str, jnp.ndarray]:
"""Loss to penalize steric clashes within residues.
This is a loss penalizing any steric violations or clashes of non-bonded atoms
in a given peptide. This loss corresponds to the part with
the same residues of
Jumper et al. (2021) Suppl. Sec. 1.9.11, eq 46.
Args:
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
atom14_dists_lower_bound: Lower bound on allowed distances.
atom14_dists_upper_bound: Upper bound on allowed distances
tighten_bounds_for_loss: Extra factor to tighten loss
Returns:
Dict containing:
* 'per_atom_loss_sum': sum of all clash losses per atom, shape (N, 14)
* 'per_atom_clash_mask': mask whether atom clashes with any other atom
shape (N, 14)
"""
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
assert len(atom14_dists_lower_bound.shape) == 3
assert len(atom14_dists_upper_bound.shape) == 3
# Compute the mask for each residue.
# shape (N, 14, 14)
dists_masks = (1. - jnp.eye(14, 14)[None])
dists_masks *= (atom14_atom_exists[:, :, None] *
atom14_atom_exists[:, None, :])
# Distance matrix
# shape (N, 14, 14)
dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, :, None, :],
atom14_pred_positions[:, None, :, :]),
axis=-1))
# Compute the loss.
# shape (N, 14, 14)
dists_to_low_error = jax.nn.relu(
atom14_dists_lower_bound + tighten_bounds_for_loss - dists)
dists_to_high_error = jax.nn.relu(
dists - (atom14_dists_upper_bound - tighten_bounds_for_loss))
loss = dists_masks * (dists_to_low_error + dists_to_high_error)
# Compute the per atom loss sum.
# shape (N, 14)
per_atom_loss_sum = (jnp.sum(loss, axis=1) +
jnp.sum(loss, axis=2))
# Compute the violations mask.
# shape (N, 14, 14)
violations = dists_masks * ((dists < atom14_dists_lower_bound) |
(dists > atom14_dists_upper_bound))
# Compute the per atom violations.
# shape (N, 14)
per_atom_violations = jnp.maximum(
jnp.max(violations, axis=1), jnp.max(violations, axis=2))
return {'per_atom_loss_sum': per_atom_loss_sum, # shape (N, 14)
'per_atom_violations': per_atom_violations # shape (N, 14)
}
def find_optimal_renaming(
atom14_gt_positions: jnp.ndarray, # (N, 14, 3)
atom14_alt_gt_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_is_ambiguous: jnp.ndarray, # (N, 14)
atom14_gt_exists: jnp.ndarray, # (N, 14)
atom14_pred_positions: jnp.ndarray, # (N, 14, 3)
atom14_atom_exists: jnp.ndarray, # (N, 14)
) -> jnp.ndarray: # (N):
"""Find optimal renaming for ground truth that maximizes LDDT.
Jumper et al. (2021) Suppl. Alg. 26
"renameSymmetricGroundTruthAtoms" lines 1-5
Args:
atom14_gt_positions: Ground truth positions in global frame of ground truth.
atom14_alt_gt_positions: Alternate ground truth positions in global frame of
ground truth with coordinates of ambiguous atoms swapped relative to
'atom14_gt_positions'.
atom14_atom_is_ambiguous: Mask denoting whether atom is among ambiguous
atoms, see Jumper et al. (2021) Suppl. Table 3
atom14_gt_exists: Mask denoting whether atom at positions exists in ground
truth.
atom14_pred_positions: Predicted positions of atoms in
global prediction frame
atom14_atom_exists: Mask denoting whether atom at positions exists for given
amino acid type
Returns:
Float array of shape [N] with 1. where atom14_alt_gt_positions is closer to
prediction and 0. otherwise
"""
assert len(atom14_gt_positions.shape) == 3
assert len(atom14_alt_gt_positions.shape) == 3
assert len(atom14_atom_is_ambiguous.shape) == 2
assert len(atom14_gt_exists.shape) == 2
assert len(atom14_pred_positions.shape) == 3
assert len(atom14_atom_exists.shape) == 2
# Create the pred distance matrix.
# shape (N, N, 14, 14)
pred_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_pred_positions[:, None, :, None, :],
atom14_pred_positions[None, :, None, :, :]),
axis=-1))
# Compute distances for ground truth with original and alternative names.
# shape (N, N, 14, 14)
gt_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_gt_positions[:, None, :, None, :],
atom14_gt_positions[None, :, None, :, :]),
axis=-1))
alt_gt_dists = jnp.sqrt(1e-10 + jnp.sum(
squared_difference(
atom14_alt_gt_positions[:, None, :, None, :],
atom14_alt_gt_positions[None, :, None, :, :]),
axis=-1))
# Compute LDDT's.
# shape (N, N, 14, 14)
lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, gt_dists))
alt_lddt = jnp.sqrt(1e-10 + squared_difference(pred_dists, alt_gt_dists))
# Create a mask for ambiguous atoms in rows vs. non-ambiguous atoms
# in cols.
# shape (N ,N, 14, 14)
mask = (atom14_gt_exists[:, None, :, None] * # rows
atom14_atom_is_ambiguous[:, None, :, None] * # rows
atom14_gt_exists[None, :, None, :] * # cols
(1. - atom14_atom_is_ambiguous[None, :, None, :])) # cols
# Aggregate distances for each residue to the non-amibuguous atoms.
# shape (N)
per_res_lddt = jnp.sum(mask * lddt, axis=[1, 2, 3])
alt_per_res_lddt = jnp.sum(mask * alt_lddt, axis=[1, 2, 3])
# Decide for each residue, whether alternative naming is better.
# shape (N)
alt_naming_is_better = (alt_per_res_lddt < per_res_lddt).astype(jnp.float32)
return alt_naming_is_better # shape (N)
def frame_aligned_point_error(
pred_frames: r3.Rigids, # shape (num_frames)
target_frames: r3.Rigids, # shape (num_frames)
frames_mask: jnp.ndarray, # shape (num_frames)
pred_positions: r3.Vecs, # shape (num_positions)
target_positions: r3.Vecs, # shape (num_positions)
positions_mask: jnp.ndarray, # shape (num_positions)
length_scale: float,
l1_clamp_distance: Optional[float] = None,
epsilon=1e-4) -> jnp.ndarray: # shape ()
"""Measure point error under different alignments.
Jumper et al. (2021) Suppl. Alg. 28 "computeFAPE"
Computes error between two structures with B points under A alignments derived
from the given pairs of frames.
Args:
pred_frames: num_frames reference frames for 'pred_positions'.
target_frames: num_frames reference frames for 'target_positions'.
frames_mask: Mask for frame pairs to use.
pred_positions: num_positions predicted positions of the structure.
target_positions: num_positions target positions of the structure.
positions_mask: Mask on which positions to score.
length_scale: length scale to divide loss by.
l1_clamp_distance: Distance cutoff on error beyond which gradients will
be zero.
epsilon: small value used to regularize denominator for masked average.
Returns:
Masked Frame Aligned Point Error.
"""
assert pred_frames.rot.xx.ndim == 1
assert target_frames.rot.xx.ndim == 1
assert frames_mask.ndim == 1, frames_mask.ndim
assert pred_positions.x.ndim == 1
assert target_positions.x.ndim == 1
assert positions_mask.ndim == 1
# Compute array of predicted positions in the predicted frames.
# r3.Vecs (num_frames, num_positions)
local_pred_pos = r3.rigids_mul_vecs(
jax.tree_map(lambda r: r[:, None], r3.invert_rigids(pred_frames)),
jax.tree_map(lambda x: x[None, :], pred_positions))
# Compute array of target positions in the target frames.
# r3.Vecs (num_frames, num_positions)
local_target_pos = r3.rigids_mul_vecs(
jax.tree_map(lambda r: r[:, None], r3.invert_rigids(target_frames)),
jax.tree_map(lambda x: x[None, :], target_positions))
# Compute errors between the structures.
# jnp.ndarray (num_frames, num_positions)
error_dist = jnp.sqrt(
r3.vecs_squared_distance(local_pred_pos, local_target_pos)
+ epsilon)
if l1_clamp_distance:
error_dist = jnp.clip(error_dist, 0, l1_clamp_distance)
normed_error = error_dist / length_scale
normed_error *= jnp.expand_dims(frames_mask, axis=-1)
normed_error *= jnp.expand_dims(positions_mask, axis=-2)
normalization_factor = (
jnp.sum(frames_mask, axis=-1) *
jnp.sum(positions_mask, axis=-1))
return (jnp.sum(normed_error, axis=(-2, -1)) /
(epsilon + normalization_factor))
def _make_renaming_matrices():
"""Matrices to map atoms to symmetry partners in ambiguous case."""
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative groundtruth coordinates where the naming is swapped
restype_3 = [
residue_constants.restype_1to3[res] for res in residue_constants.restypes
]
restype_3 += ['UNK']
# Matrices for renaming ambiguous atoms.
all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
correspondences = np.arange(14)
for source_atom_swap, target_atom_swap in swap.items():
source_index = residue_constants.restype_name_to_atom14_names[
resname].index(source_atom_swap)
target_index = residue_constants.restype_name_to_atom14_names[
resname].index(target_atom_swap)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = np.zeros((14, 14), dtype=np.float32)
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.
all_matrices[resname] = renaming_matrix.astype(np.float32)
renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
return renaming_matrices
RENAMING_MATRICES = _make_renaming_matrices()
def get_alt_atom14(aatype, positions, mask):
"""Get alternative atom14 positions.
Constructs renamed atom positions for ambiguous residues.
Jumper et al. (2021) Suppl. Table 3 "Ambiguous atom names due to 180 degree-
rotation-symmetry"
Args:
aatype: Amino acid at given position
positions: Atom positions as r3.Vecs in atom14 representation, (N, 14)
mask: Atom masks in atom14 representation, (N, 14)
Returns:
renamed atom positions, renamed atom mask
"""
# pick the transformation matrices for the given residue sequence
# shape (num_res, 14, 14)
renaming_transform = utils.batched_gather(
jnp.asarray(RENAMING_MATRICES), aatype)
positions = jax.tree_map(lambda x: x[:, :, None], positions)
alternative_positions = jax.tree_map(
lambda x: jnp.sum(x, axis=1), positions * renaming_transform)
# Create the mask for the alternative ground truth (differs from the
# ground truth mask, if only one of the atoms in an ambiguous pair has a
# ground truth position)
alternative_mask = jnp.sum(mask[..., None] * renaming_transform, axis=1)
return alternative_positions, alternative_mask
|
alphafold-main
|
alphafold/model/all_atom.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and utilities for the structure module in the multimer system."""
import functools
import numbers
from typing import Any, Dict, Iterable, Mapping, Optional, Tuple, Union
from alphafold.common import residue_constants
from alphafold.model import all_atom_multimer
from alphafold.model import common_modules
from alphafold.model import geometry
from alphafold.model import modules
from alphafold.model import prng
from alphafold.model import utils
from alphafold.model.geometry import utils as geometry_utils
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
EPSILON = 1e-8
Float = Union[float, jnp.ndarray]
def squared_difference(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
"""Computes Squared difference between two arrays."""
return jnp.square(x - y)
def make_backbone_affine(
positions: geometry.Vec3Array,
mask: jnp.ndarray,
aatype: jnp.ndarray,
) -> Tuple[geometry.Rigid3Array, jnp.ndarray]:
"""Make backbone Rigid3Array and mask."""
del aatype
a = residue_constants.atom_order['N']
b = residue_constants.atom_order['CA']
c = residue_constants.atom_order['C']
rigid_mask = (mask[:, a] * mask[:, b] * mask[:, c]).astype(
jnp.float32)
rigid = all_atom_multimer.make_transform_from_reference(
a_xyz=positions[:, a], b_xyz=positions[:, b], c_xyz=positions[:, c])
return rigid, rigid_mask
class QuatRigid(hk.Module):
"""Module for projecting Rigids via a quaternion."""
def __init__(self,
global_config: ml_collections.ConfigDict,
rigid_shape: Union[int, Iterable[int]] = tuple(),
full_quat: bool = False,
init: str = 'zeros',
name: str = 'quat_rigid'):
"""Module projecting a Rigid Object.
For this Module the Rotation is parametrized as a quaternion,
If 'full_quat' is True a 4 vector is produced for the rotation which is
normalized and treated as a quaternion.
When 'full_quat' is False a 3 vector is produced and the 1st component of
the quaternion is set to 1.
Args:
global_config: Global Config, used to set certain properties of underlying
Linear module, see common_modules.Linear for details.
rigid_shape: Shape of Rigids relative to shape of activations, e.g. when
activations have shape (n,) and this is (m,) output will be (n, m)
full_quat: Whether to parametrize rotation using full quaternion.
init: initializer to use, see common_modules.Linear for details
name: Name to use for module.
"""
self.init = init
self.global_config = global_config
if isinstance(rigid_shape, int):
self.rigid_shape = (rigid_shape,)
else:
self.rigid_shape = tuple(rigid_shape)
self.full_quat = full_quat
super(QuatRigid, self).__init__(name=name)
def __call__(self, activations: jnp.ndarray) -> geometry.Rigid3Array:
"""Executes Module.
This returns a set of rigid with the same shape as activations, projecting
the channel dimension, rigid_shape controls the trailing dimensions.
For example when activations is shape (12, 5) and rigid_shape is (3, 2)
then the shape of the output rigids will be (12, 3, 2).
This also supports passing in an empty tuple for rigid shape, in that case
the example would produce a rigid of shape (12,).
Args:
activations: Activations to use for projection, shape [..., num_channel]
Returns:
Rigid transformations with shape [...] + rigid_shape
"""
if self.full_quat:
rigid_dim = 7
else:
rigid_dim = 6
linear_dims = self.rigid_shape + (rigid_dim,)
rigid_flat = common_modules.Linear(
linear_dims,
initializer=self.init,
precision=jax.lax.Precision.HIGHEST,
name='rigid')(
activations)
rigid_flat = geometry_utils.unstack(rigid_flat)
if self.full_quat:
qw, qx, qy, qz = rigid_flat[:4]
translation = rigid_flat[4:]
else:
qx, qy, qz = rigid_flat[:3]
qw = jnp.ones_like(qx)
translation = rigid_flat[3:]
rotation = geometry.Rot3Array.from_quaternion(
qw, qx, qy, qz, normalize=True)
translation = geometry.Vec3Array(*translation)
return geometry.Rigid3Array(rotation, translation)
class PointProjection(hk.Module):
"""Given input reprensentation and frame produces points in global frame."""
def __init__(self,
num_points: Union[Iterable[int], int],
global_config: ml_collections.ConfigDict,
return_local_points: bool = False,
name: str = 'point_projection'):
"""Constructs Linear Module.
Args:
num_points: number of points to project. Can be tuple when outputting
multiple dimensions
global_config: Global Config, passed through to underlying Linear
return_local_points: Whether to return points in local frame as well.
name: name of module, used for name scopes.
"""
if isinstance(num_points, numbers.Integral):
self.num_points = (num_points,)
else:
self.num_points = tuple(num_points)
self.return_local_points = return_local_points
self.global_config = global_config
super().__init__(name=name)
def __call__(
self, activations: jnp.ndarray, rigids: geometry.Rigid3Array
) -> Union[geometry.Vec3Array, Tuple[geometry.Vec3Array, geometry.Vec3Array]]:
output_shape = self.num_points
output_shape = output_shape[:-1] + (3 * output_shape[-1],)
points_local = common_modules.Linear(
output_shape,
precision=jax.lax.Precision.HIGHEST,
name='point_projection')(
activations)
points_local = jnp.split(points_local, 3, axis=-1)
points_local = geometry.Vec3Array(*points_local)
rigids = rigids[(...,) + (None,) * len(output_shape)]
points_global = rigids.apply_to_point(points_local)
if self.return_local_points:
return points_global, points_local
else:
return points_global
class InvariantPointAttention(hk.Module):
"""Invariant point attention module.
The high-level idea is that this attention module works over a set of points
and associated orientations in 3D space (e.g. protein residues).
Each residue outputs a set of queries and keys as points in their local
reference frame. The attention is then defined as the euclidean distance
between the queries and keys in the global frame.
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
dist_epsilon: float = 1e-8,
name: str = 'invariant_point_attention'):
"""Initialize.
Args:
config: iterative Fold Head Config
global_config: Global Config of Model.
dist_epsilon: Small value to avoid NaN in distance calculation.
name: Sonnet name.
"""
super().__init__(name=name)
self._dist_epsilon = dist_epsilon
self._zero_initialize_last = global_config.zero_init
self.config = config
self.global_config = global_config
def __call__(
self,
inputs_1d: jnp.ndarray,
inputs_2d: jnp.ndarray,
mask: jnp.ndarray,
rigid: geometry.Rigid3Array,
) -> jnp.ndarray:
"""Compute geometric aware attention.
Given a set of query residues (defined by affines and associated scalar
features), this function computes geometric aware attention between the
query residues and target residues.
The residues produce points in their local reference frame, which
are converted into the global frame to get attention via euclidean distance.
Equivalently the target residues produce points in their local frame to be
used as attention values, which are converted into the query residues local
frames.
Args:
inputs_1d: (N, C) 1D input embedding that is the basis for the
scalar queries.
inputs_2d: (N, M, C') 2D input embedding, used for biases values in the
attention between query_inputs_1d and target_inputs_1d.
mask: (N, 1) mask to indicate query_inputs_1d that participate in
the attention.
rigid: Rigid object describing the position and orientation of
every element in query_inputs_1d.
Returns:
Transformation of the input embedding.
"""
num_head = self.config.num_head
attn_logits = 0.
num_point_qk = self.config.num_point_qk
# Each point pair (q, k) contributes Var [0.5 ||q||^2 - <q, k>] = 9 / 2
point_variance = max(num_point_qk, 1) * 9. / 2
point_weights = np.sqrt(1.0 / point_variance)
# This is equivalent to jax.nn.softplus, but avoids a bug in the test...
softplus = lambda x: jnp.logaddexp(x, jnp.zeros_like(x))
raw_point_weights = hk.get_parameter(
'trainable_point_weights',
shape=[num_head],
# softplus^{-1} (1)
init=hk.initializers.Constant(np.log(np.exp(1.) - 1.)))
# Trainable per-head weights for points.
trainable_point_weights = softplus(raw_point_weights)
point_weights *= trainable_point_weights
q_point = PointProjection([num_head, num_point_qk],
self.global_config,
name='q_point_projection')(inputs_1d,
rigid)
k_point = PointProjection([num_head, num_point_qk],
self.global_config,
name='k_point_projection')(inputs_1d,
rigid)
dist2 = geometry.square_euclidean_distance(
q_point[:, None, :, :], k_point[None, :, :, :], epsilon=0.)
attn_qk_point = -0.5 * jnp.sum(point_weights[:, None] * dist2, axis=-1)
attn_logits += attn_qk_point
num_scalar_qk = self.config.num_scalar_qk
# We assume that all queries and keys come iid from N(0, 1) distribution
# and compute the variances of the attention logits.
# Each scalar pair (q, k) contributes Var q*k = 1
scalar_variance = max(num_scalar_qk, 1) * 1.
scalar_weights = np.sqrt(1.0 / scalar_variance)
q_scalar = common_modules.Linear([num_head, num_scalar_qk],
use_bias=False,
name='q_scalar_projection')(
inputs_1d)
k_scalar = common_modules.Linear([num_head, num_scalar_qk],
use_bias=False,
name='k_scalar_projection')(
inputs_1d)
q_scalar *= scalar_weights
attn_logits += jnp.einsum('qhc,khc->qkh', q_scalar, k_scalar)
attention_2d = common_modules.Linear(
num_head, name='attention_2d')(inputs_2d)
attn_logits += attention_2d
mask_2d = mask * jnp.swapaxes(mask, -1, -2)
attn_logits -= 1e5 * (1. - mask_2d[..., None])
attn_logits *= np.sqrt(1. / 3) # Normalize by number of logit terms (3)
attn = jax.nn.softmax(attn_logits, axis=-2)
num_scalar_v = self.config.num_scalar_v
v_scalar = common_modules.Linear([num_head, num_scalar_v],
use_bias=False,
name='v_scalar_projection')(
inputs_1d)
# [num_query_residues, num_head, num_scalar_v]
result_scalar = jnp.einsum('qkh, khc->qhc', attn, v_scalar)
num_point_v = self.config.num_point_v
v_point = PointProjection([num_head, num_point_v],
self.global_config,
name='v_point_projection')(inputs_1d,
rigid)
result_point_global = jax.tree_map(
lambda x: jnp.sum(attn[..., None] * x, axis=-3), v_point[None])
# Features used in the linear output projection. Should have the size
# [num_query_residues, ?]
output_features = []
num_query_residues, _ = inputs_1d.shape
flat_shape = [num_query_residues, -1]
result_scalar = jnp.reshape(result_scalar, flat_shape)
output_features.append(result_scalar)
result_point_global = jax.tree_map(lambda r: jnp.reshape(r, flat_shape),
result_point_global)
result_point_local = rigid[..., None].apply_inverse_to_point(
result_point_global)
output_features.extend(
[result_point_local.x, result_point_local.y, result_point_local.z])
point_norms = result_point_local.norm(self._dist_epsilon)
output_features.append(point_norms)
# Dimensions: h = heads, i and j = residues,
# c = inputs_2d channels
# Contraction happens over the second residue dimension, similarly to how
# the usual attention is performed.
result_attention_over_2d = jnp.einsum('ijh, ijc->ihc', attn, inputs_2d)
output_features.append(jnp.reshape(result_attention_over_2d, flat_shape))
final_init = 'zeros' if self._zero_initialize_last else 'linear'
final_act = jnp.concatenate(output_features, axis=-1)
return common_modules.Linear(
self.config.num_channel,
initializer=final_init,
name='output_projection')(final_act)
class FoldIteration(hk.Module):
"""A single iteration of iterative folding.
First, each residue attends to all residues using InvariantPointAttention.
Then, we apply transition layers to update the hidden representations.
Finally, we use the hidden representations to produce an update to the
affine of each residue.
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'fold_iteration'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(
self,
activations: Mapping[str, Any],
aatype: jnp.ndarray,
sequence_mask: jnp.ndarray,
update_rigid: bool,
is_training: bool,
initial_act: jnp.ndarray,
safe_key: Optional[prng.SafeKey] = None,
static_feat_2d: Optional[jnp.ndarray] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
c = self.config
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
def safe_dropout_fn(tensor, safe_key):
return modules.apply_dropout(
tensor=tensor,
safe_key=safe_key,
rate=0.0 if self.global_config.deterministic else c.dropout,
is_training=is_training)
rigid = activations['rigid']
act = activations['act']
attention_module = InvariantPointAttention(
self.config, self.global_config)
# Attention
act += attention_module(
inputs_1d=act,
inputs_2d=static_feat_2d,
mask=sequence_mask,
rigid=rigid)
safe_key, *sub_keys = safe_key.split(3)
sub_keys = iter(sub_keys)
act = safe_dropout_fn(act, next(sub_keys))
act = common_modules.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='attention_layer_norm')(
act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
# Transition
input_act = act
for i in range(c.num_layer_in_transition):
init = 'relu' if i < c.num_layer_in_transition - 1 else final_init
act = common_modules.Linear(
c.num_channel,
initializer=init,
name='transition')(
act)
if i < c.num_layer_in_transition - 1:
act = jax.nn.relu(act)
act += input_act
act = safe_dropout_fn(act, next(sub_keys))
act = common_modules.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='transition_layer_norm')(act)
if update_rigid:
# Rigid update
rigid_update = QuatRigid(
self.global_config, init=final_init)(
act)
rigid = rigid @ rigid_update
sc = MultiRigidSidechain(c.sidechain, self.global_config)(
rigid.scale_translation(c.position_scale), [act, initial_act], aatype)
outputs = {'rigid': rigid, 'sc': sc}
rotation = jax.tree_map(jax.lax.stop_gradient, rigid.rotation)
rigid = geometry.Rigid3Array(rotation, rigid.translation)
new_activations = {
'act': act,
'rigid': rigid
}
return new_activations, outputs
def generate_monomer_rigids(representations: Mapping[str, jnp.ndarray],
batch: Mapping[str, jnp.ndarray],
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
is_training: bool,
safe_key: prng.SafeKey
) -> Dict[str, Any]:
"""Generate predicted Rigid's for a single chain.
This is the main part of the iterative fold head - it iteratively applies
folding to produce a set of predicted residue positions.
Args:
representations: Embeddings dictionary.
batch: Batch dictionary.
config: config for the iterative fold head.
global_config: global config.
is_training: is training.
safe_key: A prng.SafeKey object that wraps a PRNG key.
Returns:
A dictionary containing residue Rigid's and sidechain positions.
"""
c = config
sequence_mask = batch['seq_mask'][:, None]
act = common_modules.LayerNorm(
axis=-1, create_scale=True, create_offset=True, name='single_layer_norm')(
representations['single'])
initial_act = act
act = common_modules.Linear(
c.num_channel, name='initial_projection')(act)
# Sequence Mask has extra 1 at the end.
rigid = geometry.Rigid3Array.identity(sequence_mask.shape[:-1])
fold_iteration = FoldIteration(
c, global_config, name='fold_iteration')
assert len(batch['seq_mask'].shape) == 1
activations = {
'act':
act,
'rigid':
rigid
}
act_2d = common_modules.LayerNorm(
axis=-1,
create_scale=True,
create_offset=True,
name='pair_layer_norm')(
representations['pair'])
safe_keys = safe_key.split(c.num_layer)
outputs = []
for key in safe_keys:
activations, output = fold_iteration(
activations,
initial_act=initial_act,
static_feat_2d=act_2d,
aatype=batch['aatype'],
safe_key=key,
sequence_mask=sequence_mask,
update_rigid=True,
is_training=is_training,
)
outputs.append(output)
output = jax.tree_map(lambda *x: jnp.stack(x), *outputs)
# Pass along for LDDT-Head.
output['act'] = activations['act']
return output
class StructureModule(hk.Module):
"""StructureModule as a network head.
Jumper et al. (2021) Suppl. Alg. 20 "StructureModule"
"""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'structure_module'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
representations: Mapping[str, jnp.ndarray],
batch: Mapping[str, Any],
is_training: bool,
safe_key: Optional[prng.SafeKey] = None,
compute_loss: bool = False
) -> Dict[str, Any]:
c = self.config
ret = {}
if safe_key is None:
safe_key = prng.SafeKey(hk.next_rng_key())
output = generate_monomer_rigids(
representations=representations,
batch=batch,
config=self.config,
global_config=self.global_config,
is_training=is_training,
safe_key=safe_key)
ret['traj'] = output['rigid'].scale_translation(c.position_scale).to_array()
ret['sidechains'] = output['sc']
ret['sidechains']['atom_pos'] = ret['sidechains']['atom_pos'].to_array()
ret['sidechains']['frames'] = ret['sidechains']['frames'].to_array()
if 'local_atom_pos' in ret['sidechains']:
ret['sidechains']['local_atom_pos'] = ret['sidechains'][
'local_atom_pos'].to_array()
ret['sidechains']['local_frames'] = ret['sidechains'][
'local_frames'].to_array()
aatype = batch['aatype']
seq_mask = batch['seq_mask']
atom14_pred_mask = all_atom_multimer.get_atom14_mask(
aatype) * seq_mask[:, None]
atom14_pred_positions = output['sc']['atom_pos'][-1]
ret['final_atom14_positions'] = atom14_pred_positions # (N, 14, 3)
ret['final_atom14_mask'] = atom14_pred_mask # (N, 14)
atom37_mask = all_atom_multimer.get_atom37_mask(aatype) * seq_mask[:, None]
atom37_pred_positions = all_atom_multimer.atom14_to_atom37(
atom14_pred_positions, aatype)
atom37_pred_positions *= atom37_mask[:, :, None]
ret['final_atom_positions'] = atom37_pred_positions # (N, 37, 3)
ret['final_atom_mask'] = atom37_mask # (N, 37)
ret['final_rigids'] = ret['traj'][-1]
ret['act'] = output['act']
if compute_loss:
return ret
else:
no_loss_features = ['final_atom_positions', 'final_atom_mask', 'act']
no_loss_ret = {k: ret[k] for k in no_loss_features}
return no_loss_ret
def loss(self,
value: Mapping[str, Any],
batch: Mapping[str, Any]
) -> Dict[str, Any]:
raise NotImplementedError(
'This function should be called on a batch with reordered chains (see '
'Evans et al (2021) Section 7.3. Multi-Chain Permutation Alignment.')
ret = {'loss': 0.}
ret['metrics'] = {}
aatype = batch['aatype']
all_atom_positions = batch['all_atom_positions']
all_atom_positions = geometry.Vec3Array.from_array(all_atom_positions)
all_atom_mask = batch['all_atom_mask']
seq_mask = batch['seq_mask']
residue_index = batch['residue_index']
gt_rigid, gt_affine_mask = make_backbone_affine(all_atom_positions,
all_atom_mask,
aatype)
chi_angles, chi_mask = all_atom_multimer.compute_chi_angles(
all_atom_positions, all_atom_mask, aatype)
pred_mask = all_atom_multimer.get_atom14_mask(aatype)
pred_mask *= seq_mask[:, None]
pred_positions = value['final_atom14_positions']
pred_positions = geometry.Vec3Array.from_array(pred_positions)
gt_positions, gt_mask, alt_naming_is_better = compute_atom14_gt(
aatype, all_atom_positions, all_atom_mask, pred_positions)
violations = find_structural_violations(
aatype=aatype,
residue_index=residue_index,
mask=pred_mask,
pred_positions=pred_positions,
config=self.config,
asym_id=batch['asym_id'])
sidechains = value['sidechains']
gt_chi_angles = get_renamed_chi_angles(aatype, chi_angles,
alt_naming_is_better)
# Several violation metrics:
violation_metrics = compute_violation_metrics(
residue_index=residue_index,
mask=pred_mask,
seq_mask=seq_mask,
pred_positions=pred_positions,
violations=violations)
ret['metrics'].update(violation_metrics)
target_rigid = geometry.Rigid3Array.from_array(value['traj'])
gt_frames_mask = gt_affine_mask
# Split the loss into within-chain and between-chain components.
intra_chain_mask = batch['asym_id'][:, None] == batch['asym_id'][None, :]
intra_chain_bb_loss, intra_chain_fape = backbone_loss(
gt_rigid=gt_rigid,
gt_frames_mask=gt_frames_mask,
gt_positions_mask=gt_affine_mask,
target_rigid=target_rigid,
config=self.config.intra_chain_fape,
pair_mask=intra_chain_mask)
interface_bb_loss, interface_fape = backbone_loss(
gt_rigid=gt_rigid,
gt_frames_mask=gt_frames_mask,
gt_positions_mask=gt_affine_mask,
target_rigid=target_rigid,
config=self.config.interface_fape,
pair_mask=1. - intra_chain_mask)
bb_loss = intra_chain_bb_loss + interface_bb_loss
ret['fape'] = intra_chain_fape + interface_fape
ret['bb_loss'] = bb_loss
ret['loss'] += bb_loss
pred_frames = geometry.Rigid3Array.from_array(sidechains['frames'])
pred_positions = geometry.Vec3Array.from_array(sidechains['atom_pos'])
gt_sc_frames, gt_sc_frames_mask = compute_frames(
aatype=aatype,
all_atom_positions=all_atom_positions,
all_atom_mask=all_atom_mask,
use_alt=alt_naming_is_better)
sc_loss = sidechain_loss(
gt_frames=gt_sc_frames,
gt_frames_mask=gt_sc_frames_mask,
gt_positions=gt_positions,
gt_mask=gt_mask,
pred_frames=pred_frames,
pred_positions=pred_positions,
config=self.config)
ret['loss'] = ((1 - self.config.sidechain.weight_frac) * ret['loss'] +
self.config.sidechain.weight_frac * sc_loss['loss'])
ret['sidechain_fape'] = sc_loss['fape']
unnormed_angles = sidechains['unnormalized_angles_sin_cos']
pred_angles = sidechains['angles_sin_cos']
sup_chi_loss, ret['chi_loss'], ret[
'angle_norm_loss'] = supervised_chi_loss(
sequence_mask=seq_mask,
target_chi_mask=chi_mask,
target_chi_angles=gt_chi_angles,
aatype=aatype,
pred_angles=pred_angles,
unnormed_angles=unnormed_angles,
config=self.config)
ret['loss'] += sup_chi_loss
if self.config.structural_violation_loss_weight:
ret['loss'] += structural_violation_loss(
mask=pred_mask, violations=violations, config=self.config)
return ret
def compute_atom14_gt(
aatype: jnp.ndarray,
all_atom_positions: geometry.Vec3Array,
all_atom_mask: jnp.ndarray,
pred_pos: geometry.Vec3Array
) -> Tuple[geometry.Vec3Array, jnp.ndarray, jnp.ndarray]:
"""Find atom14 positions, this includes finding the correct renaming."""
gt_positions, gt_mask = all_atom_multimer.atom37_to_atom14(
aatype, all_atom_positions,
all_atom_mask)
alt_gt_positions, alt_gt_mask = all_atom_multimer.get_alt_atom14(
aatype, gt_positions, gt_mask)
atom_is_ambiguous = all_atom_multimer.get_atom14_is_ambiguous(aatype)
alt_naming_is_better = all_atom_multimer.find_optimal_renaming(
gt_positions=gt_positions,
alt_gt_positions=alt_gt_positions,
atom_is_ambiguous=atom_is_ambiguous,
gt_exists=gt_mask,
pred_positions=pred_pos)
use_alt = alt_naming_is_better[:, None]
gt_mask = (1. - use_alt) * gt_mask + use_alt * alt_gt_mask
gt_positions = (1. - use_alt) * gt_positions + use_alt * alt_gt_positions
return gt_positions, alt_gt_mask, alt_naming_is_better
def backbone_loss(gt_rigid: geometry.Rigid3Array,
gt_frames_mask: jnp.ndarray,
gt_positions_mask: jnp.ndarray,
target_rigid: geometry.Rigid3Array,
config: ml_collections.ConfigDict,
pair_mask: jnp.ndarray
) -> Tuple[Float, jnp.ndarray]:
"""Backbone FAPE Loss."""
loss_fn = functools.partial(
all_atom_multimer.frame_aligned_point_error,
l1_clamp_distance=config.atom_clamp_distance,
length_scale=config.loss_unit_distance)
loss_fn = jax.vmap(loss_fn, (0, None, None, 0, None, None, None))
fape = loss_fn(target_rigid, gt_rigid, gt_frames_mask,
target_rigid.translation, gt_rigid.translation,
gt_positions_mask, pair_mask)
return jnp.mean(fape), fape[-1]
def compute_frames(
aatype: jnp.ndarray,
all_atom_positions: geometry.Vec3Array,
all_atom_mask: jnp.ndarray,
use_alt: jnp.ndarray
) -> Tuple[geometry.Rigid3Array, jnp.ndarray]:
"""Compute Frames from all atom positions.
Args:
aatype: array of aatypes, int of [N]
all_atom_positions: Vector of all atom positions, shape [N, 37]
all_atom_mask: mask, shape [N]
use_alt: whether to use alternative orientation for ambiguous aatypes
shape [N]
Returns:
Rigid corresponding to Frames w shape [N, 8],
mask which Rigids are present w shape [N, 8]
"""
frames_batch = all_atom_multimer.atom37_to_frames(aatype, all_atom_positions,
all_atom_mask)
gt_frames = frames_batch['rigidgroups_gt_frames']
alt_gt_frames = frames_batch['rigidgroups_alt_gt_frames']
use_alt = use_alt[:, None]
renamed_gt_frames = jax.tree_map(
lambda x, y: (1. - use_alt) * x + use_alt * y, gt_frames, alt_gt_frames)
return renamed_gt_frames, frames_batch['rigidgroups_gt_exists']
def sidechain_loss(gt_frames: geometry.Rigid3Array,
gt_frames_mask: jnp.ndarray,
gt_positions: geometry.Vec3Array,
gt_mask: jnp.ndarray,
pred_frames: geometry.Rigid3Array,
pred_positions: geometry.Vec3Array,
config: ml_collections.ConfigDict
) -> Dict[str, jnp.ndarray]:
"""Sidechain Loss using cleaned up rigids."""
flat_gt_frames = jax.tree_map(jnp.ravel, gt_frames)
flat_frames_mask = jnp.ravel(gt_frames_mask)
flat_gt_positions = jax.tree_map(jnp.ravel, gt_positions)
flat_positions_mask = jnp.ravel(gt_mask)
# Compute frame_aligned_point_error score for the final layer.
def _slice_last_layer_and_flatten(x):
return jnp.ravel(x[-1])
flat_pred_frames = jax.tree_map(_slice_last_layer_and_flatten, pred_frames)
flat_pred_positions = jax.tree_map(_slice_last_layer_and_flatten,
pred_positions)
fape = all_atom_multimer.frame_aligned_point_error(
pred_frames=flat_pred_frames,
target_frames=flat_gt_frames,
frames_mask=flat_frames_mask,
pred_positions=flat_pred_positions,
target_positions=flat_gt_positions,
positions_mask=flat_positions_mask,
pair_mask=None,
length_scale=config.sidechain.loss_unit_distance,
l1_clamp_distance=config.sidechain.atom_clamp_distance)
return {
'fape': fape,
'loss': fape}
def structural_violation_loss(mask: jnp.ndarray,
violations: Mapping[str, Float],
config: ml_collections.ConfigDict
) -> Float:
"""Computes Loss for structural Violations."""
# Put all violation losses together to one large loss.
num_atoms = jnp.sum(mask).astype(jnp.float32) + 1e-6
between_residues = violations['between_residues']
within_residues = violations['within_residues']
return (config.structural_violation_loss_weight *
(between_residues['bonds_c_n_loss_mean'] +
between_residues['angles_ca_c_n_loss_mean'] +
between_residues['angles_c_n_ca_loss_mean'] +
jnp.sum(between_residues['clashes_per_atom_loss_sum'] +
within_residues['per_atom_loss_sum']) / num_atoms
))
def find_structural_violations(
aatype: jnp.ndarray,
residue_index: jnp.ndarray,
mask: jnp.ndarray,
pred_positions: geometry.Vec3Array, # (N, 14)
config: ml_collections.ConfigDict,
asym_id: jnp.ndarray,
) -> Dict[str, Any]:
"""Computes several checks for structural Violations."""
# Compute between residue backbone violations of bonds and angles.
connection_violations = all_atom_multimer.between_residue_bond_loss(
pred_atom_positions=pred_positions,
pred_atom_mask=mask.astype(jnp.float32),
residue_index=residue_index.astype(jnp.float32),
aatype=aatype,
tolerance_factor_soft=config.violation_tolerance_factor,
tolerance_factor_hard=config.violation_tolerance_factor)
# Compute the van der Waals radius for every atom
# (the first letter of the atom name is the element type).
# shape (N, 14)
atomtype_radius = jnp.array([
residue_constants.van_der_waals_radius[name[0]]
for name in residue_constants.atom_types
])
residx_atom14_to_atom37 = all_atom_multimer.get_atom14_to_atom37_map(aatype)
atom_radius = mask * utils.batched_gather(atomtype_radius,
residx_atom14_to_atom37)
# Compute the between residue clash loss.
between_residue_clashes = all_atom_multimer.between_residue_clash_loss(
pred_positions=pred_positions,
atom_exists=mask,
atom_radius=atom_radius,
residue_index=residue_index,
overlap_tolerance_soft=config.clash_overlap_tolerance,
overlap_tolerance_hard=config.clash_overlap_tolerance,
asym_id=asym_id)
# Compute all within-residue violations (clashes,
# bond length and angle violations).
restype_atom14_bounds = residue_constants.make_atom14_dists_bounds(
overlap_tolerance=config.clash_overlap_tolerance,
bond_length_tolerance_factor=config.violation_tolerance_factor)
dists_lower_bound = utils.batched_gather(restype_atom14_bounds['lower_bound'],
aatype)
dists_upper_bound = utils.batched_gather(restype_atom14_bounds['upper_bound'],
aatype)
within_residue_violations = all_atom_multimer.within_residue_violations(
pred_positions=pred_positions,
atom_exists=mask,
dists_lower_bound=dists_lower_bound,
dists_upper_bound=dists_upper_bound,
tighten_bounds_for_loss=0.0)
# Combine them to a single per-residue violation mask (used later for LDDT).
per_residue_violations_mask = jnp.max(jnp.stack([
connection_violations['per_residue_violation_mask'],
jnp.max(between_residue_clashes['per_atom_clash_mask'], axis=-1),
jnp.max(within_residue_violations['per_atom_violations'],
axis=-1)]), axis=0)
return {
'between_residues': {
'bonds_c_n_loss_mean':
connection_violations['c_n_loss_mean'], # ()
'angles_ca_c_n_loss_mean':
connection_violations['ca_c_n_loss_mean'], # ()
'angles_c_n_ca_loss_mean':
connection_violations['c_n_ca_loss_mean'], # ()
'connections_per_residue_loss_sum':
connection_violations['per_residue_loss_sum'], # (N)
'connections_per_residue_violation_mask':
connection_violations['per_residue_violation_mask'], # (N)
'clashes_mean_loss':
between_residue_clashes['mean_loss'], # ()
'clashes_per_atom_loss_sum':
between_residue_clashes['per_atom_loss_sum'], # (N, 14)
'clashes_per_atom_clash_mask':
between_residue_clashes['per_atom_clash_mask'], # (N, 14)
},
'within_residues': {
'per_atom_loss_sum':
within_residue_violations['per_atom_loss_sum'], # (N, 14)
'per_atom_violations':
within_residue_violations['per_atom_violations'], # (N, 14),
},
'total_per_residue_violations_mask':
per_residue_violations_mask, # (N)
}
def compute_violation_metrics(
residue_index: jnp.ndarray,
mask: jnp.ndarray,
seq_mask: jnp.ndarray,
pred_positions: geometry.Vec3Array, # (N, 14)
violations: Mapping[str, jnp.ndarray],
) -> Dict[str, jnp.ndarray]:
"""Compute several metrics to assess the structural violations."""
ret = {}
between_residues = violations['between_residues']
within_residues = violations['within_residues']
extreme_ca_ca_violations = all_atom_multimer.extreme_ca_ca_distance_violations(
positions=pred_positions,
mask=mask.astype(jnp.float32),
residue_index=residue_index.astype(jnp.float32))
ret['violations_extreme_ca_ca_distance'] = extreme_ca_ca_violations
ret['violations_between_residue_bond'] = utils.mask_mean(
mask=seq_mask,
value=between_residues['connections_per_residue_violation_mask'])
ret['violations_between_residue_clash'] = utils.mask_mean(
mask=seq_mask,
value=jnp.max(between_residues['clashes_per_atom_clash_mask'], axis=-1))
ret['violations_within_residue'] = utils.mask_mean(
mask=seq_mask,
value=jnp.max(within_residues['per_atom_violations'], axis=-1))
ret['violations_per_residue'] = utils.mask_mean(
mask=seq_mask, value=violations['total_per_residue_violations_mask'])
return ret
def supervised_chi_loss(
sequence_mask: jnp.ndarray,
target_chi_mask: jnp.ndarray,
aatype: jnp.ndarray,
target_chi_angles: jnp.ndarray,
pred_angles: jnp.ndarray,
unnormed_angles: jnp.ndarray,
config: ml_collections.ConfigDict) -> Tuple[Float, Float, Float]:
"""Computes loss for direct chi angle supervision."""
eps = 1e-6
chi_mask = target_chi_mask.astype(jnp.float32)
pred_angles = pred_angles[:, :, 3:]
residue_type_one_hot = jax.nn.one_hot(
aatype, residue_constants.restype_num + 1, dtype=jnp.float32)[None]
chi_pi_periodic = jnp.einsum('ijk, kl->ijl', residue_type_one_hot,
jnp.asarray(residue_constants.chi_pi_periodic))
true_chi = target_chi_angles[None]
sin_true_chi = jnp.sin(true_chi)
cos_true_chi = jnp.cos(true_chi)
sin_cos_true_chi = jnp.stack([sin_true_chi, cos_true_chi], axis=-1)
# This is -1 if chi is pi periodic and +1 if it's 2 pi periodic
shifted_mask = (1 - 2 * chi_pi_periodic)[..., None]
sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi
sq_chi_error = jnp.sum(
squared_difference(sin_cos_true_chi, pred_angles), -1)
sq_chi_error_shifted = jnp.sum(
squared_difference(sin_cos_true_chi_shifted, pred_angles), -1)
sq_chi_error = jnp.minimum(sq_chi_error, sq_chi_error_shifted)
sq_chi_loss = utils.mask_mean(mask=chi_mask[None], value=sq_chi_error)
angle_norm = jnp.sqrt(jnp.sum(jnp.square(unnormed_angles), axis=-1) + eps)
norm_error = jnp.abs(angle_norm - 1.)
angle_norm_loss = utils.mask_mean(mask=sequence_mask[None, :, None],
value=norm_error)
loss = (config.chi_weight * sq_chi_loss
+ config.angle_norm_weight * angle_norm_loss)
return loss, sq_chi_loss, angle_norm_loss
def l2_normalize(x: jnp.ndarray,
axis: int = -1,
epsilon: float = 1e-12
) -> jnp.ndarray:
return x / jnp.sqrt(
jnp.maximum(jnp.sum(x**2, axis=axis, keepdims=True), epsilon))
def get_renamed_chi_angles(aatype: jnp.ndarray,
chi_angles: jnp.ndarray,
alt_is_better: jnp.ndarray
) -> jnp.ndarray:
"""Return renamed chi angles."""
chi_angle_is_ambiguous = utils.batched_gather(
jnp.array(residue_constants.chi_pi_periodic, dtype=jnp.float32), aatype)
alt_chi_angles = chi_angles + np.pi * chi_angle_is_ambiguous
# Map back to [-pi, pi].
alt_chi_angles = alt_chi_angles - 2 * np.pi * (alt_chi_angles > np.pi).astype(
jnp.float32)
alt_is_better = alt_is_better[:, None]
return (1. - alt_is_better) * chi_angles + alt_is_better * alt_chi_angles
class MultiRigidSidechain(hk.Module):
"""Class to make side chain atoms."""
def __init__(self,
config: ml_collections.ConfigDict,
global_config: ml_collections.ConfigDict,
name: str = 'rigid_sidechain'):
super().__init__(name=name)
self.config = config
self.global_config = global_config
def __call__(self,
rigid: geometry.Rigid3Array,
representations_list: Iterable[jnp.ndarray],
aatype: jnp.ndarray
) -> Dict[str, Any]:
"""Predict sidechains using multi-rigid representations.
Args:
rigid: The Rigid's for each residue (translations in angstoms)
representations_list: A list of activations to predict sidechains from.
aatype: amino acid types.
Returns:
dict containing atom positions and frames (in angstrom)
"""
act = [
common_modules.Linear( # pylint: disable=g-complex-comprehension
self.config.num_channel,
name='input_projection')(jax.nn.relu(x))
for x in representations_list]
# Sum the activation list (equivalent to concat then Conv1D)
act = sum(act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
# Mapping with some residual blocks.
for _ in range(self.config.num_residual_block):
old_act = act
act = common_modules.Linear(
self.config.num_channel,
initializer='relu',
name='resblock1')(
jax.nn.relu(act))
act = common_modules.Linear(
self.config.num_channel,
initializer=final_init,
name='resblock2')(
jax.nn.relu(act))
act += old_act
# Map activations to torsion angles.
# [batch_size, num_res, 14]
num_res = act.shape[0]
unnormalized_angles = common_modules.Linear(
14, name='unnormalized_angles')(
jax.nn.relu(act))
unnormalized_angles = jnp.reshape(
unnormalized_angles, [num_res, 7, 2])
angles = l2_normalize(unnormalized_angles, axis=-1)
outputs = {
'angles_sin_cos': angles, # jnp.ndarray (N, 7, 2)
'unnormalized_angles_sin_cos':
unnormalized_angles, # jnp.ndarray (N, 7, 2)
}
# Map torsion angles to frames.
# geometry.Rigid3Array with shape (N, 8)
all_frames_to_global = all_atom_multimer.torsion_angles_to_frames(
aatype,
rigid,
angles)
# Use frames and literature positions to create the final atom coordinates.
# geometry.Vec3Array with shape (N, 14)
pred_positions = all_atom_multimer.frames_and_literature_positions_to_atom14_pos(
aatype, all_frames_to_global)
outputs.update({
'atom_pos': pred_positions, # geometry.Vec3Array (N, 14)
'frames': all_frames_to_global, # geometry.Rigid3Array (N, 8)
})
return outputs
|
alphafold-main
|
alphafold/model/folding_multimer.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lddt."""
from absl.testing import absltest
from absl.testing import parameterized
from alphafold.model import lddt
import numpy as np
class LddtTest(parameterized.TestCase, absltest.TestCase):
@parameterized.named_parameters(
('same',
[[0, 0, 0], [5, 0, 0], [10, 0, 0]],
[[0, 0, 0], [5, 0, 0], [10, 0, 0]],
[1, 1, 1]),
('all_shifted',
[[0, 0, 0], [5, 0, 0], [10, 0, 0]],
[[-1, 0, 0], [4, 0, 0], [9, 0, 0]],
[1, 1, 1]),
('all_rotated',
[[0, 0, 0], [5, 0, 0], [10, 0, 0]],
[[0, 0, 0], [0, 5, 0], [0, 10, 0]],
[1, 1, 1]),
('half_a_dist',
[[0, 0, 0], [5, 0, 0]],
[[0, 0, 0], [5.5-1e-5, 0, 0]],
[1, 1]),
('one_a_dist',
[[0, 0, 0], [5, 0, 0]],
[[0, 0, 0], [6-1e-5, 0, 0]],
[0.75, 0.75]),
('two_a_dist',
[[0, 0, 0], [5, 0, 0]],
[[0, 0, 0], [7-1e-5, 0, 0]],
[0.5, 0.5]),
('four_a_dist',
[[0, 0, 0], [5, 0, 0]],
[[0, 0, 0], [9-1e-5, 0, 0]],
[0.25, 0.25],),
('five_a_dist',
[[0, 0, 0], [16-1e-5, 0, 0]],
[[0, 0, 0], [11, 0, 0]],
[0, 0]),
('no_pairs',
[[0, 0, 0], [20, 0, 0]],
[[0, 0, 0], [25-1e-5, 0, 0]],
[1, 1]),
)
def test_lddt(
self, predicted_pos, true_pos, exp_lddt):
predicted_pos = np.array([predicted_pos], dtype=np.float32)
true_points_mask = np.array([[[1]] * len(true_pos)], dtype=np.float32)
true_pos = np.array([true_pos], dtype=np.float32)
cutoff = 15.0
per_residue = True
result = lddt.lddt(
predicted_pos, true_pos, true_points_mask, cutoff,
per_residue)
np.testing.assert_almost_equal(result, [exp_lddt], decimal=4)
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/model/lddt_test.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alphafold model."""
|
alphafold-main
|
alphafold/model/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to generate processed features."""
import copy
from typing import List, Mapping, Tuple
from alphafold.model.tf import input_pipeline
from alphafold.model.tf import proteins_dataset
import ml_collections
import numpy as np
import tensorflow.compat.v1 as tf
FeatureDict = Mapping[str, np.ndarray]
def make_data_config(
config: ml_collections.ConfigDict,
num_res: int,
) -> Tuple[ml_collections.ConfigDict, List[str]]:
"""Makes a data config for the input pipeline."""
cfg = copy.deepcopy(config.data)
feature_names = cfg.common.unsupervised_features
if cfg.common.use_templates:
feature_names += cfg.common.template_features
with cfg.unlocked():
cfg.eval.crop_size = num_res
return cfg, feature_names
def tf_example_to_features(tf_example: tf.train.Example,
config: ml_collections.ConfigDict,
random_seed: int = 0) -> FeatureDict:
"""Converts tf_example to numpy feature dictionary."""
num_res = int(tf_example.features.feature['seq_length'].int64_list.value[0])
cfg, feature_names = make_data_config(config, num_res=num_res)
if 'deletion_matrix_int' in set(tf_example.features.feature):
deletion_matrix_int = (
tf_example.features.feature['deletion_matrix_int'].int64_list.value)
feat = tf.train.Feature(float_list=tf.train.FloatList(
value=map(float, deletion_matrix_int)))
tf_example.features.feature['deletion_matrix'].CopyFrom(feat)
del tf_example.features.feature['deletion_matrix_int']
tf_graph = tf.Graph()
with tf_graph.as_default(), tf.device('/device:CPU:0'):
tf.compat.v1.set_random_seed(random_seed)
tensor_dict = proteins_dataset.create_tensor_dict(
raw_data=tf_example.SerializeToString(),
features=feature_names)
processed_batch = input_pipeline.process_tensors_from_config(
tensor_dict, cfg)
tf_graph.finalize()
with tf.Session(graph=tf_graph) as sess:
features = sess.run(processed_batch)
return {k: v for k, v in features.items() if v.dtype != 'O'}
def np_example_to_features(np_example: FeatureDict,
config: ml_collections.ConfigDict,
random_seed: int = 0) -> FeatureDict:
"""Preprocesses NumPy feature dict using TF pipeline."""
np_example = dict(np_example)
num_res = int(np_example['seq_length'][0])
cfg, feature_names = make_data_config(config, num_res=num_res)
if 'deletion_matrix_int' in np_example:
np_example['deletion_matrix'] = (
np_example.pop('deletion_matrix_int').astype(np.float32))
tf_graph = tf.Graph()
with tf_graph.as_default(), tf.device('/device:CPU:0'):
tf.compat.v1.set_random_seed(random_seed)
tensor_dict = proteins_dataset.np_to_tensor_dict(
np_example=np_example, features=feature_names)
processed_batch = input_pipeline.process_tensors_from_config(
tensor_dict, cfg)
tf_graph.finalize()
with tf.Session(graph=tf_graph) as sess:
features = sess.run(processed_batch)
return {k: v for k, v in features.items() if v.dtype != 'O'}
|
alphafold-main
|
alphafold/model/features.py
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for prng."""
from absl.testing import absltest
from alphafold.model import prng
import jax
class PrngTest(absltest.TestCase):
def test_key_reuse(self):
init_key = jax.random.PRNGKey(42)
safe_key = prng.SafeKey(init_key)
_, safe_key = safe_key.split()
raw_key = safe_key.get()
self.assertFalse((raw_key == init_key).all())
with self.assertRaises(RuntimeError):
safe_key.get()
with self.assertRaises(RuntimeError):
safe_key.split()
with self.assertRaises(RuntimeError):
safe_key.duplicate()
if __name__ == '__main__':
absltest.main()
|
alphafold-main
|
alphafold/model/prng_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.