python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST-m handler."""
import os
import re
import tarfile
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_DATA_FNAME = 'mnist_m.tar.gz'
_TRAIN_LABELS_FNAME = 'mnist_m/mnist_m_train_labels.txt'
_TEST_LABELS_FNAME = 'mnist_m/mnist_m_test_labels.txt'
_FNAME_AND_LABEL_REGEX = r'([\d]+.png) ([\d]+)'
def _parse_labels(labels_fname, tf):
"""Parses the labels and filenames for given label_fname from a tarfile."""
read_buffer = tf.extractfile(labels_fname)
if read_buffer is None:
raise ValueError(f'Failed to read {labels_fname}')
fname_to_label_list = read_buffer.read().decode('utf-8').split('\n')
parsed_labels = dict()
for fname_to_label in fname_to_label_list:
if not fname_to_label:
continue
regex_match = re.search(_FNAME_AND_LABEL_REGEX, fname_to_label)
if regex_match is None:
raise ValueError('Regex match returned None result.')
fname, label = regex_match.groups()
label = int(label)
parsed_labels[fname] = label
# parsed_labels.append((fname, label))
return parsed_labels
def mnist_m_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for MNIST-m dataset."""
with tarfile.open(os.path.join(dataset_path, _DATA_FNAME)) as tf:
train_fname_labels = _parse_labels(_TRAIN_LABELS_FNAME, tf)
test_fname_labels = _parse_labels(_TEST_LABELS_FNAME, tf)
def gen(fname_to_labels):
with tarfile.open(os.path.join(dataset_path, _DATA_FNAME), 'r:gz') as tf:
for member in tf.getmembers():
image_fname = os.path.basename(member.path)
if image_fname not in fname_to_labels:
continue
image = Image.open(tf.extractfile(member))
image.load()
label = fname_to_labels[image_fname]
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=10,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr',
))
# TODO: Make more efficient deduplication algorithm.
merged_fname_labels = train_fname_labels
merged_fname_labels.update(test_fname_labels)
make_gen_fn = eu.deduplicate_data_generator(gen(merged_fname_labels))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
mnist_m_dataset = types.DownloadableDataset(
name='mnist_m',
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/uc?export=download&id=0B_tExHiYS-0veklUZHFYT19KYjg&confirm=t',
checksum='859df31c91afe82e80e5012ba928f279')
],
website_url='http://yaroslav.ganin.net/',
handler=mnist_m_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/mnist_m.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NIH CHEST X-RAY dataset handler."""
import os
from typing import Dict, List
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits as su
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_TRAIN_LIST_FNAME = 'train_val_list.txt'
_TEST_LIST_FNAME = 'test_list.txt'
_LABEL_FNAME = 'Data_Entry_2017.csv'
_LABELS = [
'Atelectasis', 'Consolidation', 'Infiltration', 'Pneumothorax', 'Edema',
'Emphysema', 'Fibrosis', 'Effusion', 'Pneumonia', 'Pleural_Thickening',
'Cardiomegaly', 'Nodule', 'Mass', 'Hernia', 'No Finding'
]
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'.pdf',
r'.txt',
r'__MACOSX',
r'DS_Store',
])
def _path_to_label_fn(path: str, file_to_labels: Dict[str,
List[int]]) -> List[int]:
filename = os.path.basename(path)
return file_to_labels[filename]
def nih_chest_xray_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports NIH Chest X-Ray dataset.
The dataset home page is at
https://www.kaggle.com/datasets/nih-chest-xrays/data
This dataset contains x-ray images of the lung area. There are over 110,000
images in total, 15 labels. The task is multilabel classification, as each
image can belong to multiple categories.
The dataset comes as a single zip file. The file Data_Entry_2017.csv in the
base directory hosts the labels for each image. For instance:
00000001_001.png,Cardiomegaly|Emphysema,... means that this particular image
has assigned two labels.
The file train_val_list.txt and test_list.txt are a list of images to be used
in the training/validation set, and test set respectively.
For instance, the first three entried of test_list.txt are:
00000003_000.png
00000003_001.png
00000003_002.png
Then there are 12 image folders containing png images.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
zip_file_path, *other_files_in_directory = gfile.listdir(dataset_path)
assert not other_files_in_directory
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
num_classes = len(_LABELS)
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='multi-label',
image_type='xray',
),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=num_classes)),
'png_encoded_image':
tfds.features.Image()
}))
# Extract train/test splits and labels for each image
paths_to_train_files = []
paths_to_test_files = []
file_to_label_ids = dict()
with zipfile.ZipFile(os.path.join(dataset_path, zip_file_path), 'r') as zf:
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if f.filename == _TRAIN_LIST_FNAME:
with zf.open(name) as infile:
paths_to_train_files = infile.readlines()
paths_to_train_files = [
line.decode('utf-8').strip() for line in paths_to_train_files
]
if f.filename == _TEST_LIST_FNAME:
with zf.open(name) as infile:
paths_to_test_files = infile.readlines()
paths_to_test_files = [
line.decode('utf-8').strip() for line in paths_to_test_files
]
if f.filename == _LABEL_FNAME:
with zf.open(name) as infile:
# Ignore column headers.
infile.readline()
for line in infile:
fields = line.decode('utf-8').split(',')
img_fname = fields[0]
# There are multiple labels per example, each separated by '|'.
labels = fields[1].split('|')
file_to_label_ids[img_fname] = [label_to_id[lab] for lab in labels]
# pylint:disable=g-long-lambda
def make_gen_fn(file_list, file_to_label_ids):
return utils.generate_images_from_zip_files_with_multilabels(
dataset_path=dataset_path,
zip_file_names=[zip_file_path],
path_to_attributes_fn=lambda path: _path_to_label_fn(
path, file_to_label_ids),
ignored_files_regex=_IGNORED_FILES_REGEX,
path_filter=lambda path: os.path.basename(path) in file_list,
)
train_split_gen_fn = lambda: make_gen_fn(paths_to_train_files,
file_to_label_ids)
per_split_gen = su.random_split_generator_into_splits_with_fractions(
train_split_gen_fn, su.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
su.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen_fn(paths_to_test_files, file_to_label_ids)
return metadata, per_split_gen
nih_chest_xray_dataset = types.DownloadableDataset(
name='nih_chest_xray',
download_urls=[
types.KaggleDataset(
dataset_name='nih-chest-xrays/data',
checksum='ddd3acbfa23adf60ac08312b3c4040e2')
],
paper_title='ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases',
authors='Xiaosong Wang, Yifan Peng, Le Lu, Zhiyong Lu, Mohammadhadi Bagheri, Ronald M. Summers',
year='2017',
website_url='https://www.kaggle.com/datasets/nih-chest-xrays/data',
handler=nih_chest_xray_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/nih_chest_xray.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flickr Material Database (FMD).
This dataset was taken from
http://people.csail.mit.edu/celiu/CVPR2010/FMD/index.html.
"""
import os
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
FMD_PATH = "FMD.zip"
IMAGE_SHAPE = (512, 384)
CLASS_NAMES = [
"fabric",
"foliage",
"glass",
"leather",
"metal",
"paper",
"plastic",
"stone",
"water",
"wood",
]
_ignored_files_regex = r"\.asv$|\.m$|\.db$"
def handler(artifacts_path: str) -> types.HandlerOutput:
"""Downloads the Flickr materials database."""
metadata = types.DatasetMetaData(
num_classes=len(CLASS_NAMES),
num_channels=3,
image_shape=IMAGE_SHAPE,
additional_metadata={
"class_names": CLASS_NAMES,
})
def gen():
with zipfile.ZipFile(os.path.join(artifacts_path, FMD_PATH), "r") as zf:
for img, label in extraction_utils.generate_images_from_zip(
zf,
path_to_label_fn=_path_to_label,
ignored_files_regex=_ignored_files_regex,
path_filter=lambda x: x.startswith("image"),
convert_mode="RGB"):
assert img.size == IMAGE_SHAPE
yield img, label
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
def write_fixture(path: str) -> None:
"""Write a fixture to the given path."""
fixture_paths = [
"image/fabric/fabric_moderate_019_new.jpg",
"image/foliage/foliage_final_079_new.jpg",
"image/glass/glass_moderate_025_new.jpg",
"image/glass/glass_object_041_new.jpg",
"image/leather/leather_object_027_new.jpg",
"image/metal/metal_moderate_050_new.jpg",
"image/paper/paper_object_001_new.jpg",
"image/plastic/plastic_object_029_new.jpg",
"image/stone/stone_object_023_new.jpg",
"image/water/water_object_011_new.jpg",
"image/wood/wood_object_037_new.jpg",
]
with zipfile.ZipFile(os.path.join(path, FMD_PATH), "w") as zf:
for fixture_path in fixture_paths:
with zf.open(fixture_path, "w") as f:
image = Image.new("RGB", size=IMAGE_SHAPE, color=(155, 0, 0))
image.save(f, "jpeg")
def _path_to_label(path: str) -> int:
return CLASS_NAMES.index(os.path.basename(os.path.dirname(path)))
flickr_material_database_dataset = types.DownloadableDataset(
name="flickr_material_database",
download_urls=[
types.DownloadableArtefact(
url="http://people.csail.mit.edu/celiu/CVPR2010/FMD/FMD.zip",
checksum="0721ba72cd981aa9599a81bbfaaebd75")
],
website_url="http://people.csail.mit.edu/celiu/CVPR2010/FMD/index.html",
handler=handler,
paper_title="Accuracy and speed of material categorization in real-world images",
authors="L. Sharan, R. Rosenholtz, E. H. Adelson",
year=2014,
fixture_writer=write_fixture)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/flickr_material_database.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.splits."""
import collections
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import splits
_TEST_CASES = [
dict(
num_examples=10000,
splits_with_fractions=dict(train=0.7, valid=0.1, test=0.2)),
dict(
num_examples=10000,
splits_with_fractions=dict(train=0.8, valid=0.1, test=0.1)),
dict(
num_examples=100000,
splits_with_fractions=dict(train=0.4, valid=0.25, test=0.35)),
]
class SplitsTest(parameterized.TestCase):
@parameterized.parameters(_TEST_CASES)
def test_random_split_generator_into_splits_with_fractions(
self, num_examples, splits_with_fractions):
def make_gen_fn():
yield from range(num_examples)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits_with_fractions)
split_names = splits_with_fractions.keys()
per_split_elems = collections.defaultdict(set)
for split_name in split_names:
split_gen = per_split_gen[split_name]
for elem in split_gen:
per_split_elems[split_name].add(elem)
fraction = len(per_split_elems[split_name]) / num_examples
# Check that the fractions are close to the initial ones.
self.assertAlmostEqual(
fraction, splits_with_fractions[split_name], places=2)
# Check that sum of the elements is equal to num_examples
self.assertEqual(num_examples,
sum([len(elems) for elems in per_split_elems.values()]))
# Check that different elements are disjoint
split_names = sorted(splits_with_fractions.keys())
for split_name_a in split_names:
for split_name_b in split_names:
if split_name_a == split_name_b:
continue
self.assertEmpty(per_split_elems[split_name_a].intersection(
per_split_elems[split_name_b]))
@parameterized.parameters(_TEST_CASES)
def test_random_split_generator_into_splits_with_fractions_and_merged(
self, num_examples, splits_with_fractions):
def make_gen_fn():
yield from range(num_examples)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits_with_fractions,
{'train_and_valid': ('train', 'valid')})
per_split_elems = collections.defaultdict(set)
for split_name, split_gen in per_split_gen.items():
for elem in split_gen:
per_split_elems[split_name].add(elem)
self.assertSetEqual(
per_split_elems['train_and_valid'],
per_split_elems['train'].union(per_split_elems['valid']))
self.assertLen(
per_split_elems['train_and_valid'],
len(per_split_elems['train']) + len(per_split_elems['valid']))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/splits_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MS-COCO handler."""
import collections
import io
import json
import os
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_TRAIN_IMAGES_FNAME = 'train2017.zip'
_VAL_IMAGES_FNAME = 'val2017.zip'
_TRAIN_ANNOTATIONS = 'annotations_trainval2017.zip'
_NUM_CLASSES = 90
_MIN_TOLERANCE = 16
def _single_label_gen(images_zip_path, data_info, images_info, prefix):
"""Produces a generator for single-label case."""
with zipfile.ZipFile(images_zip_path) as zf:
for annotation in data_info['annotations']:
image_fname = images_info[annotation['image_id']]
category_id = annotation['category_id'] - 1
(x, y, width, height) = annotation['bbox']
if width < _MIN_TOLERANCE or height < _MIN_TOLERANCE:
continue
image = Image.open(io.BytesIO(zf.read(os.path.join(prefix, image_fname))))
x_max = min(image.width, x + width)
y_max = min(image.height, y + height)
cropped_image = image.crop((x, y, x_max, y_max))
yield types.Example(
image=cropped_image, label=category_id, multi_label=None)
def _multi_label_gen(images_zip_path, image_to_categories, images_info, prefix):
"""Produces a generator for multi-label case."""
with zipfile.ZipFile(images_zip_path) as zf:
for image_id, categories in image_to_categories.items():
image_fname = images_info[image_id]
image = Image.open(io.BytesIO(zf.read(os.path.join(prefix, image_fname))))
yield types.Example(
image=image,
label=None,
multi_label=np.nonzero(categories)[0].tolist())
def coco_handler(dataset_path: str,
is_multi_label: bool = True) -> types.HandlerOutput:
"""Handler for MS-COCO dataset."""
train_images = {}
val_images = {}
default_categories_fn = lambda: np.zeros((_NUM_CLASSES,))
train_images_to_categories = collections.defaultdict(default_categories_fn)
val_images_to_categories = collections.defaultdict(default_categories_fn)
with zipfile.ZipFile(os.path.join(dataset_path, _TRAIN_ANNOTATIONS)) as zf:
with gfile.GFile(
zf.extract('annotations/instances_train2017.json',
os.path.join(dataset_path)), 'r') as f:
train_info = json.load(f)
for image_info in train_info['images']:
train_images[image_info['id']] = image_info['file_name']
for annotation in train_info['annotations']:
category_id = annotation['category_id'] - 1
train_images_to_categories[annotation['image_id']][category_id] = 1
with gfile.GFile(
zf.extract('annotations/instances_val2017.json',
os.path.join(dataset_path)), 'r') as f:
val_info = json.load(f)
for image_info in val_info['images']:
val_images[image_info['id']] = image_info['file_name']
for annotation in val_info['annotations']:
category_id = annotation['category_id'] - 1
val_images_to_categories[annotation['image_id']][category_id] = 1
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(),
additional_metadata=dict(
task_type='classification',
image_type='object',
))
if is_multi_label:
metadata.features = tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=_NUM_CLASSES)),
'png_encoded_image':
tfds.features.Image()
})
def make_gen_single_label(is_test):
if is_test:
return _single_label_gen(
os.path.join(dataset_path, _VAL_IMAGES_FNAME), val_info, val_images,
'val2017')
else:
return _single_label_gen(
os.path.join(dataset_path, _TRAIN_IMAGES_FNAME), train_info,
train_images, 'train2017')
def make_gen_multi_label(is_test):
if is_test:
return _multi_label_gen(
os.path.join(dataset_path, _VAL_IMAGES_FNAME),
val_images_to_categories, val_images, 'val2017')
else:
return _multi_label_gen(
os.path.join(dataset_path, _TRAIN_IMAGES_FNAME),
train_images_to_categories, train_images, 'train2017')
def make_gen(is_test):
if is_multi_label:
return make_gen_multi_label(is_test)
return make_gen_single_label(is_test)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
lambda: make_gen(is_test=False), splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen(is_test=True)
return (metadata, per_split_gen)
# TODO: redundant DL
coco_single_label_dataset = types.DownloadableDataset(
name='coco_single_label',
download_urls=[
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/train2017.zip',
checksum='cced6f7f71b7629ddf16f17bbcfab6b2'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/val2017.zip',
checksum='442b8da7639aecaf257c1dceb8ba8c80'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/test2017.zip',
checksum='77ad2c53ac5d0aea611d422c0938fb35'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
checksum='f4bbac642086de4f52a3fdda2de5fa2c'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/annotations/image_info_test2017.zip',
checksum='85da7065e5e600ebfee8af1edb634eb5')
],
handler=lambda ds: coco_handler(ds, is_multi_label=False),
paper_title='Microsoft COCO: Common Objects in Context',
authors='Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, Piotr Dollár',
year='2014',
website_url='https://cocodataset.org/#home',
)
coco_multi_label_dataset = types.DownloadableDataset(
name='coco_multi_label',
download_urls=[
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/train2017.zip',
checksum='cced6f7f71b7629ddf16f17bbcfab6b2'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/val2017.zip',
checksum='442b8da7639aecaf257c1dceb8ba8c80'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/zips/test2017.zip',
checksum='77ad2c53ac5d0aea611d422c0938fb35'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
checksum='f4bbac642086de4f52a3fdda2de5fa2c'),
types.DownloadableArtefact(
url='http://images.cocodataset.org/annotations/image_info_test2017.zip',
checksum='85da7065e5e600ebfee8af1edb634eb5')
],
handler=lambda ds: coco_handler(ds, is_multi_label=True),
paper_title='Microsoft COCO: Common Objects in Context',
authors='Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, Piotr Dollár',
year='2014',
website_url='https://cocodataset.org/#home',
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/coco.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PACS dataset.
PACS (Photo-Art-Cartoon-Sketch) is a dataset for testing generalization.
The original split by the authors are not done by domains, so we use sketch
for dev-test / test.
"""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import h5py
import numpy as np
from PIL import Image
from tensorflow.io import gfile
IMAGE_SHAPE = (227, 227)
CATEGORIES = frozenset([
'photo',
'art_painting',
'cartoon',
'sketch',
])
CLASS_NAMES = (
'dog',
'elephant',
'giraffe',
'guitar',
'horse',
'house',
'person',
)
SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST = {'test': 0.5, 'dev-test': 0.5}
def handler(dataset_path: str) -> types.HandlerOutput:
"""A handler to download the PACS dataset."""
metadata = types.DatasetMetaData(
num_classes=len(CLASS_NAMES),
num_channels=3,
image_shape=IMAGE_SHAPE,
additional_metadata={
'class_names': CLASS_NAMES,
'categories': CATEGORIES,
})
def gen(categories):
for filename in gfile.listdir(dataset_path):
# check if fname contains a domain in domain_ls
if any(category_name in filename for category_name in categories):
with h5py.File(os.path.join(dataset_path, filename), 'r') as f:
# Get the data
images = list(f['images'])
labels = list(f['labels'])
for image, label in zip(images, labels):
# change image from BGR to RGB and label to start from 0
yield Image.fromarray(image[..., ::-1].astype('uint8')), label-1
train_make_gen_fn = eu.deduplicate_data_generator(
gen(categories=['photo', 'art_painting', 'cartoon']))
test_make_gen_fn = eu.deduplicate_data_generator(gen(categories=['sketch']))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
train_make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
test_dev_test_gen = splits.random_split_generator_into_splits_with_fractions(
test_make_gen_fn, SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST)
per_split_gen = per_split_gen | test_dev_test_gen
return metadata, per_split_gen
def write_fixture(path: str) -> None:
"""Writes a fixture to the given path."""
# For each category and class, write a single fixture image.
for category in CATEGORIES:
with h5py.File(os.path.join(path, category + '.hdf5'), 'w') as hf:
images = list(
np.random.randint(256, size=(len(CLASS_NAMES),) + IMAGE_SHAPE + (3,)))
labels = [i + 1 for i in list(range(len(CLASS_NAMES)))]
hf.create_dataset('images', data=images, dtype='uint16')
hf.create_dataset('labels', data=labels, dtype='uint16')
pacs_dataset = types.DownloadableDataset(
name='pacs',
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1e2WfiUmpv25FzRHYrA_8rooqEWicwbGA&export=download&confirm=y',
checksum='7cd392ecb9e0ab0f0e8be9d8fc5ed5a2'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1qvJeF3WgfZgBgNBnzJGOLVOMncLgi5uN&export=download&confirm=y',
checksum='d5df8be042fd2525efeb29cfb2252026'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=10yRj3m8bB_PAiKqOcct1viTGu0DuT5un&export=download&confirm=y',
checksum='a91c0ee93df8278028ff49072317e24a'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1ID0Y-v0EvKz1VL7XIKfZtb2FOxF89gVQ&export=download&confirm=y',
checksum='e9205c7d19484ea8b5082abe1560dad3'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1BpaNvaSRXZ09xwnC5TWBv3ktOBj36mp7&export=download&confirm=y',
checksum='988c767ea2e542268be87044e3da60f5'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=16pF2YwohULpkXV3NNRiBvDy4SBWyhxvz&export=download&confirm=y',
checksum='e1c23f2990c290b38a07c970750b6226'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1gNHdceC8tS1JLcb6sZGaT7w6zwwTkiXp&export=download&confirm=y',
checksum='4578bcf9207ffa2ad9608976e8f4cf37'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=14_xcxAYTsURyhBKS2FBNwqQFNGIkDbP7&export=download&confirm=y',
checksum='d2d58d2df269ffa2f79d68f5942e4109'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1e8PvYV1Rbc3uDDKt0iADNNT6fXH95FIC&export=download&confirm=y',
checksum='77fb7329500a70150d1b4637652720b9'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1xj-PJhD4xBtPv6EETGImlA0Pju7KdIH0&export=download&confirm=y',
checksum='52c846632c2b903536c097e6ccd91c39'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1li1j1-315EmjXbuqRnMIxiH_u7Kpj81b&export=download&confirm=y',
checksum='44293bc45b2a41fba17cf163c8a01c0a'),
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1OQnAweOPYbwhNt9uQ07aVV1GNmkTAcD2&export=download&confirm=y',
checksum='9ea4965e0c61ad295437be4b6cf10681')
],
website_url='https://dali-dl.github.io/project_iccv2017.html',
handler=handler,
paper_title='Deeper, Broader and Artier Domain Generalization',
authors='Da Li, Yongxin Yang, Yi-Zhe Song, Timothy M. Hospedales',
year=2017,
fixture_writer=write_fixture)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pacs.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SUN-attributes handler."""
import os
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import scipy.io
import tensorflow_datasets as tfds
_IMAGES_FNAME = 'SUNAttributeDB_Images.tar.gz'
_ATTRIBUTE_FNAME = 'SUNAttributeDB.tar.gz'
_LABELS_FNAME = 'SUNAttributeDB/attributeLabels_continuous.mat'
_IMAGE_FILENAMES_FNAME = 'SUNAttributeDB/images.mat'
_NUM_CLASSES = 102
def sun_attributes_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for SUN-attributes dataset."""
with tarfile.open(os.path.join(dataset_path, _ATTRIBUTE_FNAME), 'r') as tf:
attributes = scipy.io.loadmat(tf.extractfile(_LABELS_FNAME))['labels_cv']
image_filenames = scipy.io.loadmat(
tf.extractfile(_IMAGE_FILENAMES_FNAME))['images']
image_fname_to_attributes = {}
for (image_fname, image_attributes) in zip(image_filenames, attributes):
image_attributes = np.nonzero(image_attributes)[0].tolist()
image_fname_to_attributes[os.path.join(
'images', image_fname[0].item())] = image_attributes
def make_gen():
with tarfile.open(os.path.join(dataset_path, _IMAGES_FNAME), 'r|gz') as tf:
for member in tf:
image_fname = member.name
if image_fname not in image_fname_to_attributes:
continue
attributes = image_fname_to_attributes[image_fname]
image = Image.open(tf.extractfile(member))
yield types.Example(image=image, multi_label=attributes, label=None)
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=_NUM_CLASSES)),
'png_encoded_image':
tfds.features.Image()
}))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
sun_attributes_dataset = types.DownloadableDataset(
name='sun_attributes',
download_urls=[
types.DownloadableArtefact(
url='https://cs.brown.edu/~gmpatter/Attributes/SUNAttributeDB_Images.tar.gz',
checksum='5966725c7306df6e05cd3ada7f45a18b'),
types.DownloadableArtefact(
url='https://cs.brown.edu/~gmpatter/Attributes/SUNAttributeDB.tar.gz',
checksum='883293e5b645822f6ae0046c6df54183')
],
handler=sun_attributes_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/sun_attributes.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.pacs."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import pacs
class PacsTest(parameterized.TestCase):
@parameterized.parameters([
dict(categories=pacs.CATEGORIES),
])
def test_handler(self, categories):
fixture_dir = self.create_tempdir().full_path
dataset = pacs.pacs_dataset
dataset.fixture_writer(fixture_dir)
metadata, generator = dataset.handler(fixture_dir)
self.assertLen(metadata.additional_metadata['categories'], len(categories))
samples = list()
for split in ['train', 'dev', 'dev-test', 'test']:
self.assertIn(split, generator)
samples.extend(list(generator[split]))
# Each category and class has a single image in the fixture.
self.assertLen(samples, len(categories) * metadata.num_classes)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pacs_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPEG7 handler."""
import os
from typing import Optional
import zipfile
from absl import logging
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _label_from_filename(filename: str) -> Optional[str]:
"""Extracts a text label given a filename for the MPEG7 dataset.
Args:
filename: Name of the file, for example,
'/path/to/dataset/original/lizzard-13.gif'
Returns:
label: A text label, for example, 'lizzard' in the filename above
"""
# There are six extra files when the dataset is downloaded. Ignore those files
label = os.path.split(filename)[1].split('-')[0]
if 'confusion' in label or 'shapedata' in label:
logging.info('skipping %s', label)
return None
return label
def _path_to_label_fn(path: str, label_to_id):
label = _label_from_filename(path)
if label:
return label_to_id[label]
else:
return None
# pylint:disable=missing-function-docstring
def mpeg7_handler(dataset_path: str) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
labels = set()
for file in files:
with zipfile.ZipFile(os.path.join(dataset_path, file), 'r') as zf:
for filename in zf.namelist():
label = _label_from_filename(filename)
if label is not None:
labels.add(label)
labels = sorted(labels)
num_classes = len(labels)
label_to_id = dict(((label, idx) for idx, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_to_id=label_to_id))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
mpeg7_dataset = types.DownloadableDataset(
name='mpeg7',
download_urls=[
types.DownloadableArtefact(
url='https://dabi.temple.edu/external/shape/MPEG7/MPEG7dataset.zip',
checksum='bedd54856c425dcc6e242515c4f67d75')
],
website_url='https://dabi.temple.edu/external/shape/MPEG7',
paper_title='Learning Context Sensitive Shape Similarity by Graph Transduction',
authors=' Xiang Bai and Xingwei Yang and Longin Jan Latecki and Wenyu Liu and Zhuowen Tu.',
year='2009',
handler=mpeg7_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/mpeg7.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pascal VOC 2006 dataset handler."""
import os
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
_LABEL_PATH = 'VOCdevkit/VOC2006/ImageSets/'
def pascal_voc2006_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Pascal VOC 2006 dataset.
Link: http://host.robots.ox.ac.uk/pascal/VOC/databases.html#VOC2006
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(dataset_path)
assert len(files) == 2
first_file = files[0]
is_train = first_file.endswith('_trainval.tar')
pattern = '_trainval.txt' if is_train else '_test.txt'
suffix_length = len(pattern)
raw_file_path = dataset_path
# Extract class names and their number.
with tarfile.open(os.path.join(dataset_path, first_file)) as tfile:
class_files = [
tarinfo for tarinfo in tfile.getmembers()
if (tarinfo.name.startswith(_LABEL_PATH) and
tarinfo.name.endswith(pattern))]
classes = [cf.name.split('/')[-1][:-suffix_length] for cf in class_files]
num_classes = len(classes)
label_to_id = dict()
for cc in range(num_classes):
label_to_id[classes[cc]] = cc
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object'))
def get_image_label_pair(file: str):
is_train = file.endswith('_trainval.tar')
pattern = '_trainval.txt' if is_train else '_test.txt'
suffix_length = len(pattern)
with tarfile.open(os.path.join(raw_file_path, file)) as tar:
# Extract list of images and their label.
class_files = [
tarinfo for tarinfo in tar.getmembers()
if (tarinfo.name.startswith(_LABEL_PATH) and
tarinfo.name.endswith(pattern))]
image_dict = dict()
for cf in class_files:
class_name = cf.name.split('/')[-1][:-suffix_length]
f_obj = tar.extractfile(cf)
assert f_obj
lines = f_obj.readlines()
lines = [ll.decode('utf-8') for ll in lines]
curr_image_list = [ll[:-4] for ll in lines if ll.endswith(' 1\n')]
for ci in curr_image_list:
image_dict[ci + '.png'] = label_to_id[class_name]
# Extract actual images.
tarinfos = [tarinfo for tarinfo in tar.getmembers()
if tarinfo.name.split('/')[-1] in image_dict.keys()]
assert tarinfos
for ti in tarinfos:
f_obj = tar.extractfile(ti)
image = Image.open(f_obj)
image.load()
label = image_dict[ti.name.split('/')[-1]]
yield (image, label)
def gen_split(is_test_split: bool):
if is_test_split:
# extract test set
file = [file for file in files if file.endswith('_test.tar')]
assert len(file) == 1
return get_image_label_pair(file[0])
else:
# extract training set
file = [file for file in files if file.endswith('_trainval.tar')]
assert len(file) == 1
return get_image_label_pair(file[0])
make_gen_fn = lambda: gen_split(is_test_split=False)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_split(is_test_split=True)
return (metadata, per_split_gen)
pascal_voc2006_dataset = types.DownloadableDataset(
name='pascal_voc2006',
download_urls=[
types.DownloadableArtefact(
url='http://host.robots.ox.ac.uk/pascal/VOC/download/voc2006_trainval.tar',
checksum='af06612e5ad9863bde6fa7aae55f8866'),
types.DownloadableArtefact(
url='http://host.robots.ox.ac.uk/pascal/VOC/download/voc2006_test.tar',
checksum='6bd028d82d057621c4fc69e9c56517ef')
],
handler=pascal_voc2006_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pascal_voc2006.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ISBI-ISIC 2017 melanoma classification challenge dataset handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from tensorflow.io import gfile
TRAIN_ZIP_PATH = "ISIC-2017_Training_Data.zip"
TRAIN_LABEL_PATH = "ISIC-2017_Training_Part3_GroundTruth.csv"
VALIDATION_ZIP_PATH = "ISIC-2017_Validation_Data.zip"
VALIDATION_LABEL_PATH = "ISIC-2017_Validation_Part3_GroundTruth.csv"
TEST_ZIP_PATH = "ISIC-2017_Test_v2_Data.zip"
TEST_LABEL_PATH = "ISIC-2017_Test_v2_Part3_GroundTruth.csv"
# Ratio to splitting the training set into train, dev splits.
TRAIN_RATIO = 0.8
CLASS_NAMES = ["melanoma", "seborrheic keratosis", "benign nevi"]
def melanoma_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports ISBI-ISIC 2017 melanoma classification challenge dataset.
The dataset home page is at https://challenge.isic-archive.com/data/.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
metadata = types.DatasetMetaData(
num_classes=3,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata={
"class_names": CLASS_NAMES,
"train_size": 1600,
"dev_size": 400,
"train_and_dev_size": 2000,
"dev-test_size": 150,
"test_size": 600,
})
def gen(split):
zip_path, label_path = _get_zip_and_label_file_paths(split)
path_label_pairs = _create_path_label_pairs(
artifacts_path, zip_path, label_path)
# Shuffle for random splitting.
if split in ["train", "dev", "train_and_dev"]:
rng = np.random.default_rng(seed=1)
path_label_pairs = rng.permutation(path_label_pairs)
num_trains = int(len(path_label_pairs) * TRAIN_RATIO)
# Split the training set into train and dev subsets.
if split == "train":
path_label_pairs = path_label_pairs[:num_trains]
elif split == "dev":
path_label_pairs = path_label_pairs[num_trains:]
path_to_label = dict(path_label_pairs)
return extraction_utils.generate_images_from_zip_files(
artifacts_path, [zip_path],
path_to_label_fn=lambda path: int(path_to_label[path]),
path_filter=lambda path: path in path_to_label)
return metadata, {
"train": gen("train"),
"train_and_dev": gen("train_and_dev"),
"dev": gen("dev"),
"dev-test": gen("dev-test"),
"test": gen("test"),
}
def _get_zip_and_label_file_paths(split: str):
"""Returns the zip and label file of a split."""
if split in ["train", "dev", "train_and_dev"]:
zip_path = TRAIN_ZIP_PATH
label_path = TRAIN_LABEL_PATH
elif split == "dev-test":
zip_path = VALIDATION_ZIP_PATH
label_path = VALIDATION_LABEL_PATH
elif split == "test":
zip_path = TEST_ZIP_PATH
label_path = TEST_LABEL_PATH
else:
raise ValueError(f"Unsupported split name: {split}.")
return zip_path, label_path
def _create_path_label_pairs(
artifacts_path: str, zip_path: str, label_path: str):
"""Reads the label file and return a list of file path and label pairs."""
zip_path_root = os.path.splitext(zip_path)[0]
path_label_pairs = []
with gfile.GFile(os.path.join(artifacts_path, label_path), "r") as f:
# skip first line
f.readline()
for line in f:
path_label_pairs.append(
_get_image_path_and_label_for_path(line, zip_path_root))
return path_label_pairs
def _get_image_path_and_label_for_path(line: str, zip_path_root: str):
"""Parses a line to get the image path and label."""
# Each line is in the format of "image_id,melanoma,seborrheic_keratosis".
parts = line.split(",")
if len(parts) != 3:
raise ValueError(f"Invalid format in line {line}.")
image_id = parts[0]
path = f"{zip_path_root}/{image_id}.jpg"
melanoma = float(parts[1]) == 1
seborrheic_keratosis = float(parts[2]) == 1
if melanoma + seborrheic_keratosis > 1:
raise ValueError(f"Line {line} contains multiple classes.")
if melanoma:
label = 0
elif seborrheic_keratosis:
label = 1
else:
label = 2
return path, label
melanoma_dataset = types.DownloadableDataset(
name="melanoma",
download_urls=[
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Data.zip",
checksum="a14a7e622c67a358797ae59abb8a0b0c"),
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Part3_GroundTruth.csv",
checksum="0cb4add57c65c22ca1a1cb469ad1f0c5"),
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Validation_Data.zip",
checksum="8d6419d942112f709894c0d82f6c9038"),
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Validation_Part3_GroundTruth.csv",
checksum="8d4826a76adcd8fb928ca52a23ebae4c"),
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Test_v2_Data.zip",
checksum="5f6a0b5e1f2972bd1f5ea02680489f09"),
types.DownloadableArtefact(
url="https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Test_v2_Part3_GroundTruth.csv",
checksum="9e957b72a0c4f9e0d924889fd03b36ed")
],
handler=melanoma_handler,
website_url="https://challenge.isic-archive.com/data/",
paper_url="https://arxiv.org/abs/1710.05006",
paper_title=("Skin Lesion Analysis Toward Melanoma Detection: A Challenge "
"at the 2017 International Symposium on Biomedical Imaging "
"(ISBI), Hosted by the International Skin Imaging "
"Collaboration (ISIC)"),
authors=("Noel C. F. Codella, David Gutman, M. Emre Celebi, Brian Helba, "
"Michael A. Marchetti, Stephen W. Dusza, Aadi Kalloo, "
"Konstantinos Liopyris, Nabin Mishra, Harald Kittler, "
"Allan Halpern"),
year=2017,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/melanoma.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data downloads using the Kaggle CLI.
Forked from tensorflow_datasets.core.download.kaggle.py. We fork to remove the
automatic zip extraction which deleted the archive.zip file. We want to do it
ourselves, so we can check the checksum of the zip file beforehand.
We also customize the dataset dir location where the file is downloaded.
"""
# pylint: disable=raise-missing-from
import enum
import subprocess
import textwrap
from typing import List
from absl import logging
class KaggleOrigin(enum.Enum):
DATASETS = 'datasets'
COMPETITIONS = 'competitions'
def _run_command(command_args: List[str]) -> str:
"""Runs kaggle command with subprocess.
It contains the full command a user would use when working with the CLI,
as described here: https://www.kaggle.com/docs/api.
Args:
command_args: Arguments to the kaggle api.
Returns:
output of the command.
Raises:
CalledProcessError: If the command terminates with exit status 1.
"""
command_str = ' '.join(command_args)
competition_or_dataset = command_args[-1]
try:
return subprocess.check_output(command_args, encoding='UTF-8')
except (subprocess.CalledProcessError, FileNotFoundError) as err:
if isinstance(err, subprocess.CalledProcessError) and '404' in err.output:
raise ValueError(
textwrap.dedent("""\
Error for command: {}
Competition {} not found. Please ensure you have spelled the name
correctly.
""").format(command_str, competition_or_dataset))
else:
raise RuntimeError(
textwrap.dedent("""\
Error for command: {}
To download Kaggle data through TFDS, follow the instructions to install
the kaggle API and get API credentials:
https://github.com/Kaggle/kaggle-api#installation
Additionally, you may have to join the competition through the Kaggle
website: https://www.kaggle.com/c/{}
""").format(command_str, competition_or_dataset))
def _download_competition_or_dataset(competition_or_dataset: str,
output_dir: str,
kaggle_origin: KaggleOrigin) -> None:
"""Downloads the data to `archive.zip`."""
_run_command([
'kaggle',
kaggle_origin.value,
'download',
'--path',
output_dir,
competition_or_dataset,
])
def download_kaggle_data(competition_or_dataset: str, download_dir: str,
kaggle_origin: KaggleOrigin) -> None:
"""Downloads the kaggle data to the output_dir as a zip file.
Args:
competition_or_dataset: Name of the kaggle competition/dataset.
download_dir: Path to the downloads dir.
kaggle_origin: Precise whether it is a dataset or a competition
"""
logging.info('Downloading %s into %s...', competition_or_dataset,
download_dir)
_download_competition_or_dataset(competition_or_dataset, download_dir,
kaggle_origin)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/kaggle_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scenes8 handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id):
label = os.path.split(path)[1].split('_')[0]
return label_to_id[label]
# pylint:disable=missing-function-docstring
def scenes8_handler(dataset_path: str) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
labels = [
'highway', 'street', 'tallbuilding', 'forest', 'insidecity',
'opencountry', 'mountain', 'coast'
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='scene',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
scenes8_dataset = types.DownloadableDataset(
name='scenes8',
download_urls=[
types.DownloadableArtefact(
url='https://people.csail.mit.edu/torralba/code/spatialenvelope/spatial_envelope_256x256_static_8outdoorcategories.zip',
checksum='c26fe529d49848091a759d7aadd267f5')
],
handler=scenes8_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/scenes8.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Not-MNIST handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'notMNIST_small/notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png',
r'notMNIST_small/notMNIST_small/F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png'
])
def _path_to_label_fn(path: str, label_to_id):
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
def not_mnist_handler(dataset_path: str) -> types.HandlerOutput:
"""Not-MNIST dataset handler."""
files = gfile.listdir(dataset_path)
labels = [
'A',
'B',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'J',
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='ocr',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
ignored_files_regex=_IGNORED_FILES_REGEX,
path_filter=lambda x: x.startswith('notMNIST_small/notMNIST_small'),
convert_mode='L')
# TODO: Make more efficient deduplication algorithm.
make_unique_gen_fn = utils.deduplicate_data_generator(make_gen_fn())
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_unique_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
not_mnist_dataset = types.DownloadableDataset(
name='not_mnist',
download_urls=[types.KaggleDataset(
dataset_name='jwjohnson314/notmnist',
checksum='e2a47bb2a88c2c6bcae60d9f95223ace')],
website_url='https://www.kaggle.com/jwjohnson314/notmnist',
handler=not_mnist_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/not_mnist.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graz 02 dataset handler.""" # NOTYPO
import itertools
import os
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
ZIP_FNAMES = {'bikes': 'ig02-v1.0-bikes.zip',
'cars': 'ig02-v1.0-cars.zip',
'people': 'ig02-v1.0-people.zip'}
CLASS_NAME_TO_LABEL = {'bikes': 0,
'cars': 1,
'people': 2}
IMAGE_NAME_SUFFIX = '.image.png'
# pylint:disable=missing-function-docstring
def ig02_handler(artifacts_path: str) -> types.HandlerOutput:
metadata = types.DatasetMetaData(
num_classes=3,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata={
'class_names': list(CLASS_NAME_TO_LABEL.keys()),
})
def gen(split):
# path is in the format of "{class_name}/{fname}{IMAGE_NAME_SUFFIX}".
if split in ['train', 'split']:
split_fnames = []
for class_name, zip_fname in ZIP_FNAMES.items():
with zipfile.ZipFile(
os.path.join(artifacts_path, zip_fname), 'r') as zf:
fnames_path = f'{class_name}_{split}.txt'
with zf.open(fnames_path, 'r') as f:
for line in f:
fname = line.decode().strip()
split_fnames.append(f'{class_name}/{fname}{IMAGE_NAME_SUFFIX}')
split_fnames = set(split_fnames)
path_filter = lambda path: path in split_fnames
else:
# Include all image files.
path_filter = lambda path: path.endswith(IMAGE_NAME_SUFFIX)
return utils.generate_images_from_zip_files(
artifacts_path, list(ZIP_FNAMES.values()),
path_to_label_fn=_label_from_path,
path_filter=path_filter)
# TODO: Make more efficient deduplication algorithm.
make_gen_fn = utils.deduplicate_data_generator(
itertools.chain(gen('train'), gen('test')))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
def _label_from_path(path: str) -> types.Label:
# path is in the format of "{class_name}/{fname}.image.png".
return CLASS_NAME_TO_LABEL[os.path.dirname(path)]
ig02_dataset = types.DownloadableDataset(
name='ig02', # NOTYPO
download_urls=[
types.DownloadableArtefact(
url='http://lear.inrialpes.fr/people/marszalek/data/ig02/ig02-v1.0-bikes.zip',
checksum='13266fdf968176fa3aebdd439184254f'),
types.DownloadableArtefact(
url='http://lear.inrialpes.fr/people/marszalek/data/ig02/ig02-v1.0-cars.zip',
checksum='34de933832755ee009c6f0e9d9c6426e'),
types.DownloadableArtefact(
url='http://lear.inrialpes.fr/people/marszalek/data/ig02/ig02-v1.0-people.zip',
checksum='f80d8d21f018197979c72b977986fd2f')
], # NOTYPO
handler=ig02_handler) # NOTYPO
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/ig02.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pascal VOC 2007 dataset handler."""
import os
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_LABEL_PATH = 'VOCdevkit/VOC2007/ImageSets/Main/'
def pascal_voc2007_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Pascal VOC 2007 dataset.
Link: http://host.robots.ox.ac.uk/pascal/VOC/voc2007/
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(dataset_path)
assert len(files) == 2
first_file = files[0]
is_train = 'trainval' in first_file
pattern = '_trainval.txt' if is_train else '_test.txt'
suffix_length = len(pattern)
raw_file_path = dataset_path
def extract_class_name(path: str, suffix_length: int):
return path.split('/')[-1][:-suffix_length]
def extract_tarinfos(tarfile_name: str, startstr: str, endstr: str):
with tarfile.open(os.path.join(dataset_path, tarfile_name)) as tfile:
return [
tarinfo for tarinfo in tfile.getmembers()
if (tarinfo.name.startswith(startstr) and
tarinfo.name.endswith(endstr))]
class_files = extract_tarinfos(first_file, _LABEL_PATH, pattern)
classes = [extract_class_name(cf.name, suffix_length) for cf in class_files]
num_classes = len(classes)
assert num_classes == 20
label_to_id = dict()
for cc in range(num_classes):
label_to_id[classes[cc]] = cc
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_to_id=label_to_id, image_type='object'),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=num_classes)),
'png_encoded_image':
tfds.features.Image()
}))
def get_image_label_pair(file_name: str):
is_train = 'trainval' in file_name
pattern = '_trainval.txt' if is_train else '_test.txt'
suffix_length = len(pattern)
class_files = extract_tarinfos(file_name, _LABEL_PATH, pattern)
with tarfile.open(os.path.join(raw_file_path, file_name)) as tar:
image_dict = dict()
for cf in class_files:
class_name = extract_class_name(cf.name, suffix_length)
f_obj = tar.extractfile(cf)
assert f_obj
lines = f_obj.readlines()
lines = [line.decode('utf-8') for line in lines]
curr_image_list = [line[:-4] for line in lines if line.endswith(' 1\n')]
for ci in curr_image_list:
curr_key = ci + '.jpg'
if curr_key not in image_dict.keys():
image_dict[curr_key] = []
image_dict[curr_key].append(label_to_id[class_name])
# Extract actual images.
tarinfos = [tarinfo for tarinfo in tar.getmembers()
if tarinfo.name.split('/')[-1] in image_dict.keys()]
assert tarinfos
for ti in tarinfos:
f_obj = tar.extractfile(ti)
image = Image.open(f_obj)
image.load()
attributes = image_dict[ti.name.split('/')[-1]]
yield types.Example(image=image, multi_label=attributes, label=None)
def gen_split(is_test_split: bool):
if is_test_split:
# extract test set
file = [file for file in files if 'test' in file]
assert len(file) == 1
return get_image_label_pair(file[0])
else:
# extract training set
file = [file for file in files if 'trainval' in file]
assert len(file) == 1
return get_image_label_pair(file[0])
make_gen_fn = lambda: gen_split(is_test_split=False)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_split(is_test_split=True)
return (metadata, per_split_gen)
pascal_voc2007_dataset = types.DownloadableDataset(
name='pascal_voc2007',
download_urls=[
types.DownloadableArtefact(
url='http://pjreddie.com/media/files/VOCtrainval_06-Nov-2007.tar',
checksum='c52e279531787c972589f7e41ab4ae64'),
types.DownloadableArtefact(
url='http://pjreddie.com/media/files/VOCtest_06-Nov-2007.tar',
checksum='b6e924de25625d8de591ea690078ad9f')
],
handler=pascal_voc2007_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pascal_voc2007.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UIUC Cars dataset handler."""
import os
from typing import Dict
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = r".*\.txt$|TestImages_Scale"
def uiuc_cars_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports UIUC Cars dataset (classification task).
The dataset home page is at
http://host.robots.ox.ac.uk/pascal/VOC/databases.html#UIUC
The original dataset is a car detection dataset. However, in the paper where
this task was extracted (namely, "A Single Classifier for View-Invariant
Multiple Object Class Recognition" available at
http://www.macs.hw.ac.uk/bmvc2006/papers/081.pdf), they used the original
training set (which has both positive and negative example) for
classification. The authors split the original training set into training and
test sets. Here we'll follow the same procedure but also generate a dev and
dev-test split.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(artifacts_path)
assert len(files) == 1
label_to_class_index = {"neg": 0, # There is no car.
"pos": 1} # There is a car.
metadata = types.DatasetMetaData(
num_classes=2,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
class_names=["neg", "pos"],
label_to_id=label_to_class_index,
task_type="classification",
image_type="object"))
def path_to_label_fn(path: str,
label_to_id: Dict[str, types.Label]) -> types.Label:
label = "pos" if "pos" in path else "neg"
return label_to_id[label]
def make_gen_fn():
return extraction_utils.generate_images_from_tarfiles(
os.path.join(artifacts_path, files[0]),
path_to_label_fn=lambda pp: path_to_label_fn(pp, label_to_class_index),
ignored_files_regex=_IGNORED_FILES_REGEX
)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
uiuc_cars_dataset = types.DownloadableDataset(
name="uiuc_cars",
download_urls=[
types.DownloadableArtefact(
url="http://host.robots.ox.ac.uk/pascal/VOC/download/uiuc.tar.gz",
checksum="716c6078f57839bb440967fa74116da3")
],
handler=uiuc_cars_handler,
paper_title="Learning a sparse representation for object detection",
authors="S. Agarwal and D. Roth",
year="2002")
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/uiuc_cars.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mall dataset handler."""
import os
from typing import List
import zipfile
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import scipy.io
from tensorflow.io import gfile
_NUM_CLASSES = 10
_PREFIX = 'mall_dataset/'
_LABEL_FILE = 'mall_gt.mat'
_NUM_TRAIN_IMAS = 800
_TOT_IMAS = 2000
_PERC_DEV = 0.15
_PERC_DEV_TEST = 0.15
def mall_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Mall dataset.
Link: https://personal.ie.cuhk.edu.hk/~ccloy/downloads_mall_dataset.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
dfile = gfile.listdir(dataset_path)
assert len(dfile) == 1
dfile = dfile[0]
# The paper says they use the first 800 images for training,
# and the rest for testing.
rng = np.random.default_rng(seed=1)
original_training = (rng.permutation(_NUM_TRAIN_IMAS) + 1).tolist()
ranges = [0, int((1-_PERC_DEV_TEST-_PERC_DEV)*_NUM_TRAIN_IMAS),
int((1-_PERC_DEV_TEST)*_NUM_TRAIN_IMAS), _NUM_TRAIN_IMAS]
train_ids = original_training[ranges[0]:ranges[1]]
dev_ids = original_training[ranges[1]:ranges[2]]
train_dev_ids = original_training[ranges[0]:ranges[2]]
devtest_ids = original_training[ranges[2]:ranges[3]]
test_ids = list(range(_NUM_TRAIN_IMAS + 1, _TOT_IMAS + 1))
max_size = 100
counts = []
with zipfile.ZipFile(os.path.join(dataset_path, dfile), 'r') as zf:
# The unzip folder contains a folder, frames/, with
# 2000 images.
# In the base directory, there is a matlab file with the count values for
# each image.
# Similarly to Trancos, we are going to quantize these values and turn the
# counting problem into classification. We'll bucket contiguous count values
# in such a way that there is roughly the same amount of images in each
# bucket.
with zf.open(os.path.join(_PREFIX, _LABEL_FILE)) as fo:
gf = scipy.io.loadmat(fo)
counts += gf['count'][:, 0].tolist()
tot_imas = len(counts)
assert tot_imas == _TOT_IMAS
# build a histogram
histogram = np.zeros(max_size)
for cnt in range(tot_imas):
histogram[counts[cnt] if (counts[cnt] < max_size) else max_size-1] += 1
# The idea is to divide the counts into buckets of contiguous values,
# such that the number of examples in each bucket is roughly the same.
# In order to do this, we first compute the cumulative sum of the empirical
# distribution of counts, and then divide the cumulative density
# distribution into equally sized buckets. This will make sure that each
# bucket (class) contains rougly the same number of samples.
tot_num_samples = histogram.sum()
cumsum = np.cumsum(histogram)
num_examples_per_bucket = tot_num_samples / _NUM_CLASSES
intervals = []
for cnt in range(1, _NUM_CLASSES):
indices = np.where(cumsum < num_examples_per_bucket * cnt)
assert indices[0].shape[0] > 0
intervals.append(indices[0][-1])
intervals.append(max_size)
count_to_label = []
label = 0
prev_cnt = 0
classname_to_label = dict()
for cnt in range(max_size):
if cnt > intervals[label]:
classname = '%d-%d' % (prev_cnt, cnt)
classname_to_label[classname] = label
label += 1
prev_cnt = cnt + 1
count_to_label.append(label)
classname = '%d-%d' % (prev_cnt, max_size)
classname_to_label[classname] = label
assert label == _NUM_CLASSES - 1
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=classname_to_label,
task_type='regression',
image_type='counting'))
def gen_split(imageset: List[int]):
with zipfile.ZipFile(os.path.join(dataset_path, dfile), 'r') as zf:
for f in zf.infolist():
if f.is_dir() or not f.filename.endswith('jpg'):
continue
name = int(f.filename.split('/')[-1][4:-4])
if name not in imageset:
continue
image = Image.open(zf.open(f))
image.load()
# image names start from 1, as opposed to 0.
label = count_to_label[counts[name - 1]]
yield (image, label)
return (metadata, {
'train': gen_split(train_ids),
'dev': gen_split(dev_ids),
'train_and_dev': gen_split(train_dev_ids),
'dev-test': gen_split(devtest_ids),
'test': gen_split(test_ids),
})
mall_dataset = types.DownloadableDataset(
name='mall',
download_urls=[
types.DownloadableArtefact(
url='https://personal.ie.cuhk.edu.hk/~ccloy/files/datasets/mall_dataset.zip',
checksum='48a772b5e55e8e9a66a3c8f98598fc3b')
],
handler=mall_handler,
paper_title='Feature Mining for Localised Crowd Counting',
authors='K. Chen, C. C. Loy, S. Gong, and T. Xiang',
year=2012)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/mall.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Butterflies dataset handler."""
import io
import os
import re
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
# pylint: disable=line-too-long
def butterflies_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Butterflies dataset.
Semi-Local Affine Parts for Object Recognition
S. Lazebnik, C. Schmid, J. Ponce
Published in BMVC 7 September 2004
Link:
https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/index.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
datafile = os.path.join(dataset_path, 'butterflies.zip')
with zipfile.ZipFile(os.path.join(dataset_path, datafile), 'r') as zf:
labels = set([os.path.split(member)[0] for member in zf.namelist()])
num_classes = len(labels)
label_str_to_id = {l: i for i, l in enumerate(labels)}
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_str_to_id=label_str_to_id))
# The test-split members are cut'n'pasted from
# http://www-cvr.ai.uiuc.edu/ponce_grp/data/butterflies/butterflies_f_order.txt
test_images = {
'admiral': {14, 103, 41, 86, 9, 53, 6, 55, 66, 92},
'black_swallowtail': {25, 41, 2, 22, 31, 15, 28, 35, 4, 27},
'machaon': {12, 24, 20, 52, 53, 29, 82, 25, 83, 14},
'monarch_closed': {29, 9, 58, 72, 23, 44, 67, 36, 27, 28},
'monarch_open': {46, 35, 58, 8, 76, 21, 34, 15, 83, 78},
'peacock': {1, 96, 131, 124, 22, 59, 126, 52, 20, 86},
'zebra': {57, 18, 21, 26, 43, 78, 31, 90, 6, 75}
}
assert set(test_images.keys()) == set(labels)
assert all([len(s) == 10 for s in test_images.values()])
def gen_split(select_test_split: bool):
with zipfile.ZipFile(datafile, 'r') as zf:
for member in zf.infolist():
if member.is_dir():
continue
label_str, basename = os.path.split(member.filename)
label_id = label_str_to_id[label_str]
image_idx_match = re.search(r'[a-z]+(\d+)\.jpg', basename)
if not image_idx_match:
raise ValueError('Could not parse image filename')
else:
image_idx = int(image_idx_match.group(1))
is_test_image = image_idx in test_images[label_str]
if select_test_split != is_test_image:
# Ignore image if not in the requested split
continue
image = Image.open(io.BytesIO(zf.read(member))).convert('RGB')
image.load()
yield (image, label_id)
make_gen_fn = lambda: gen_split(select_test_split=False)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_split(select_test_split=True)
return (metadata, per_split_gen)
butterflies_dataset = types.DownloadableDataset(
name='butterflies',
paper_title='Semi-Local Affine Parts for Object Recognition',
paper_url='http://www.bmva.org/bmvc/2004/papers/paper_038.html',
authors='S. Lazebnik, C. Schmid, J. Ponce',
year=2004,
website_url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/index.html',
# This dataset cannot be downloaded from Cloudtop instance.
download_urls=[
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/butterflies/butterflies.zip',
checksum='cc9bfe5e22f9001262b785ff33221581')
],
handler=butterflies_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/butterflies.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""USPS handler."""
import bz2
import os
from typing import Tuple
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
_ORIG_IMG_RANGE = (-1., 1.)
_NEW_IMG_RANGE = (0., 255.)
_TRAIN_FNAME = 'usps.bz2'
_TEST_FNAME = 'usps.t.bz2'
def _rescale_image_back_to_pixel_range(
img: np.ndarray, orig_img_range: Tuple[float, float],
new_img_range: Tuple[float, float]) -> np.ndarray:
"""Rescales image from original image range to a new one."""
assert len(orig_img_range) == 2
assert len(new_img_range) == 2
delta_orig_range = orig_img_range[1] - orig_img_range[0]
delta_new_range = new_img_range[1] - new_img_range[0]
# [0, 1]]
normalied_img = (img - orig_img_range[0]) / delta_orig_range
# [0, 255]
new_range_img = normalied_img * delta_new_range + new_img_range[0]
return new_range_img
def _parse_label_and_image_from_line(line: str):
"""Parses label and image from a parsed line."""
label, *img_data = line.split(' ')
label = int(label)
# `img_data` contains pairs x:y, where x is a pixel number and y is the value.
image_array = np.array([float(el.split(':')[1]) for el in img_data[:-1]])
image_array = _rescale_image_back_to_pixel_range(
image_array, _ORIG_IMG_RANGE, _NEW_IMG_RANGE).astype(np.uint8)
label = label - 1
return image_array, label
def usps_handler(dataset_path: str) -> types.HandlerOutput:
"""USPS handler."""
def make_gen(split):
fname = _TRAIN_FNAME if split == 'train' else _TEST_FNAME
with bz2.BZ2File(os.path.join(dataset_path, fname)) as bzf:
for line in bzf:
if not line:
continue
image_array, label = _parse_label_and_image_from_line(
line.decode('utf-8'))
image = Image.fromarray(image_array.reshape((16, 16)))
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=10,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr',
))
make_gen_fn = lambda: make_gen('train')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen('test')
return metadata, per_split_gen
usps_dataset = types.DownloadableDataset(
name='usps',
download_urls=[
types.DownloadableArtefact(
url='https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2',
checksum='ec16c51db3855ca6c91edd34d0e9b197'),
types.DownloadableArtefact(
url='https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2',
checksum='8ea070ee2aca1ac39742fdd1ef5ed118')
],
website_url='https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps',
handler=usps_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/usps.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IAPRTC-12 dataset handler."""
import collections
import os
import pathlib
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
import scipy.io
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_LABEL_MAPPING_PATH = 'saiaprtc12ok/benchmark/wlist.txt'
_TRAINING_LIST_PATH = 'saiaprtc12ok/matlab/matlab/training.mat'
_VALIDATION_LIST_PATH = 'saiaprtc12ok/matlab/matlab/validation.mat'
_TEST_LIST_PATH = 'saiaprtc12ok/matlab/matlab/testing.mat'
_LABEL_PATH = 'saiaprtc12ok/benchmark/saiapr_tc-12/' + '{idx:0=2d}' + '/labels.txt'
_IMAGE_PATH = '/images/'
_DUPLICATE = 'saiaprtc12ok'
_NUM_IMAGE_DIRECTORIES = 41
IGNORED_FILES_REGEX = r'.*\.eps$|.*\.txt$|.*\.mat$|.*\.pdf$'
def iaprtc12_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports IAPRTC-12 dataset.
The dataset homepage is at:
https://www.kaggle.com/datasets/nastyatima/iapr-tc12
The paper proposing the oringial dataset is:
The IAPR Benchmark: A New Evaluation Resource for Visual Information Systems,
Grubinger, Michael, Clough Paul D., Müller Henning, and Deselaers Thomas ,
International Conference on Language Resources and Evaluation, 24/05/2006,
Genoa, Italy, (2006)
In this handler we use the version with additional annotation and
training/valid/test splits provided by H.J. Escalante et al. (see below for a
full reference).
This is a multi-label classification dataset of natural images.
There are 20000 images in total, and 276 possible classes.
In the directory saiaprtc12ok/matlab/matlab there are:
testing.mat training.mat validation.mat
which store the ids of the corresponding splits.
saiaprtc12ok/benchmark/wlist.txt stores the association between label string
and label integer id.
saiaprtc12ok/benchmark/saiapr_tc-12/xx/labels.txt with xx in [00..40] is a
file storing the image id in that folder, region id (not used by us) and the
label id.
saiaprtc12ok/benchmark/saiapr_tc-12/xx/images/yyy.jpg with xx in [00..40] and
yyy the image id are the folders storing the actual images.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(artifacts_path)
assert len(files) == 1
class_name_to_index = dict()
ids = dict()
with zipfile.ZipFile(os.path.join(artifacts_path, files[0]), 'r') as zf:
with zf.open(_LABEL_MAPPING_PATH) as fid:
for line in fid:
fields = line.decode('UTF-8').split('\t')
fields[1] = fields[1].strip() # remove trailing \n
class_name_to_index[
fields[1] if fields[1] else ' '] = int(fields[0]) - 1
def get_image_ids(path, name):
with zipfile.ZipFile(os.path.join(artifacts_path, files[0]), 'r') as zf:
with zf.open(path) as fo:
mat = scipy.io.loadmat(fo)
return mat[name].squeeze().tolist()
ids['training'] = get_image_ids(_TRAINING_LIST_PATH, 'training')
ids['validation'] = get_image_ids(_VALIDATION_LIST_PATH, 'validation')
ids['all_training'] = ids['training'] + ids['validation']
ids['testing'] = get_image_ids(_TEST_LIST_PATH, 'testing')
def extract_labels(path):
# This is a dictionary where keys are image ids, and values are labels.
# Since it is a multi-label classification problem, there could be a
# variable (>1) number of labels per image.
output = collections.defaultdict(list) # init with empty list
with zipfile.ZipFile(os.path.join(artifacts_path, files[0]), 'r') as zf:
if path in zf.namelist():
with zf.open(path) as fo:
data = np.loadtxt(fo)
for cnt in range(data.shape[0]):
if output[data[cnt][0]]:
output[int(data[cnt][0])] = [int(data[cnt][2] - 1)]
else:
output[int(data[cnt][0])].append(int(data[cnt][2] - 1))
return output
imageid_to_label = dict()
for folder_id in range(0, _NUM_IMAGE_DIRECTORIES):
imageid_to_label.update(extract_labels(_LABEL_PATH.format(idx=folder_id)))
def _labels_from_path_fn(fname):
name = int(pathlib.Path(fname).stem)
assert name in imageid_to_label.keys()
all_labels = list(set(imageid_to_label[name]))
return all_labels
def _path_filter(fname, is_train):
img_ids = ids['all_training'] if is_train else ids['testing']
img_id = int(pathlib.Path(fname).stem)
return ((_DUPLICATE not in fname) and # Remove duplicate folder.
(_IMAGE_PATH in fname) and # Remove segmentations.
(img_id in img_ids)) # Consider images in the desired set.
def gen_split(is_train):
return extraction_utils.generate_images_from_zip_files_with_multilabels(
artifacts_path,
files,
_labels_from_path_fn,
ignored_files_regex=IGNORED_FILES_REGEX,
path_filter=lambda path: _path_filter(path, is_train),
convert_mode='RGB')
num_classes = len(class_name_to_index.keys())
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=class_name_to_index,
task_type='multi-label',
image_type='object'),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=num_classes)),
'png_encoded_image':
tfds.features.Image()
}))
# TODO: Make more efficient deduplication algorithm.
make_unique_gen_fn = lambda: gen_split(is_train=True)
# TODO: re-enable de-duplication.
# extraction_utils.deduplicate_data_generator(
# itertools.chain(gen_split(is_train=True), gen_split(is_train=False)))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_unique_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
iaprtc12_dataset = types.DownloadableDataset(
name='iaprtc12',
download_urls=[
types.KaggleDataset(
dataset_name='nastyatima/iapr-tc12',
checksum='ee251615ac2dbb55eea5a7e5e710740a')
],
website_url='https://www.kaggle.com/datasets/nastyatima/iapr-tc12',
paper_url='https://ccc.inaoep.mx/~emorales/Papers/2010/hugo.pdf',
authors='Hugo Jair Escalante, and Carlos A. Hern<E1>ndez, and Jesus A. Gonzalez, and A. L<F3>pez-L<F3>pez, and Manuel Montes, and Eduardo F. Morales, and L. Enrique Sucar, and Luis Villase<F1>or and Michael Grubinger',
papers_with_code_url='https://paperswithcode.com/dataset/iapr-tc-12',
handler=iaprtc12_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/iaprtc12.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Food 101 handler."""
import os
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_FOOD_FNAME = 'food-101.tar.gz'
_TRAIN_CLASSES_FNAME = 'food-101/meta/train.txt'
_TEST_CLASSES_FNAME = 'food-101/meta/test.txt'
_NUM_CLASSES = 101
_LABELS = [
'takoyaki', 'bruschetta', 'lobster_bisque', 'bread_pudding', 'scallops',
'pancakes', 'donuts', 'ceviche', 'grilled_salmon', 'ravioli', 'prime_rib',
'waffles', 'eggs_benedict', 'beef_tartare', 'chicken_wings', 'clam_chowder',
'panna_cotta', 'ramen', 'french_fries', 'seaweed_salad', 'lasagna',
'fried_calamari', 'deviled_eggs', 'carrot_cake', 'strawberry_shortcake',
'chocolate_mousse', 'poutine', 'beignets', 'caesar_salad', 'bibimbap',
'garlic_bread', 'cheese_plate', 'shrimp_and_grits', 'caprese_salad',
'beet_salad', 'dumplings', 'macarons', 'churros', 'samosa', 'creme_brulee',
'miso_soup', 'french_onion_soup', 'risotto', 'pulled_pork_sandwich',
'hot_and_sour_soup', 'onion_rings', 'spaghetti_bolognese', 'edamame',
'beef_carpaccio', 'steak', 'grilled_cheese_sandwich', 'peking_duck',
'frozen_yogurt', 'mussels', 'red_velvet_cake', 'oysters', 'greek_salad',
'foie_gras', 'pho', 'spaghetti_carbonara', 'pad_thai', 'huevos_rancheros',
'sashimi', 'sushi', 'gnocchi', 'hummus', 'pork_chop', 'falafel',
'chicken_curry', 'breakfast_burrito', 'club_sandwich', 'cannoli',
'chocolate_cake', 'fried_rice', 'apple_pie', 'guacamole',
'macaroni_and_cheese', 'hot_dog', 'cup_cakes', 'paella', 'ice_cream',
'escargots', 'spring_rolls', 'crab_cakes', 'croque_madame', 'hamburger',
'baby_back_ribs', 'baklava', 'pizza', 'filet_mignon', 'cheesecake',
'lobster_roll_sandwich', 'tiramisu', 'omelette', 'tacos', 'nachos', 'gyoza',
'chicken_quesadilla', 'french_toast', 'tuna_tartare', 'fish_and_chips'
]
def food101_handler(dataset_path: str) -> types.HandlerOutput:
"""Food 101 dataset handler."""
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
with tarfile.open(os.path.join(dataset_path, _FOOD_FNAME)) as tf:
test_file = tf.extractfile(_TEST_CLASSES_FNAME)
test_fnames = {fname.decode('utf-8').strip() for fname in test_file}
train_file = tf.extractfile(_TRAIN_CLASSES_FNAME)
train_fnames = {fname.decode('utf-8').strip() for fname in train_file}
def make_gen(split_fnames, class_name_to_label):
with tarfile.open(os.path.join(dataset_path, _FOOD_FNAME), 'r|gz') as tf:
for member in tf:
if member.isdir():
continue
path = member.path
class_name = os.path.basename(os.path.dirname(path))
image_fname_with_ext = os.path.basename(path)
image_fname, _ = os.path.splitext(image_fname_with_ext)
if os.path.join(class_name, image_fname) not in split_fnames:
continue
image = Image.open(tf.extractfile(member)).convert('RGB')
label = class_name_to_label[class_name]
image.load()
yield (image, label)
make_gen_fn = lambda: make_gen(train_fnames, label_to_id)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen(test_fnames, label_to_id)
return metadata, per_split_gen
food101_dataset = types.DownloadableDataset(
name='food101',
download_urls=[
types.DownloadableArtefact(
url='http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz',
checksum='85eeb15f3717b99a5da872d97d918f87')
],
website_url='https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/',
handler=food101_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/food101.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VisTex dataset handler."""
import os
import re
from typing import Any, List, Optional
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import ncompress
from PIL import Image
from tensorflow.io import gfile
_VALID_IMAGES_REGEX = r'^.+/Images/Reference/(.+)/.+.ppm'
def _decompress_archives(filenames: List[str],
foldername: str) -> None:
for filename in filenames:
if filename.endswith('tar.Z'):
path = os.path.join(foldername, filename)
with gfile.GFile(path, 'rb') as f:
buffer = ncompress.decompress(f.read())
with gfile.GFile(path[:-2], 'wb') as out:
out.write(buffer)
os.remove(path)
def _unpack_archives(dataset_path: str, foldername: str) -> None:
filenames = gfile.listdir(os.path.join(dataset_path, foldername))
for filename in filenames:
if '.tar' in filename:
utils.unpack_file(filename, os.path.join(dataset_path, foldername))
def _extract_classname_from_path(path: str) -> Optional[str]:
match = re.match(_VALID_IMAGES_REGEX, path)
if not match:
raise ValueError(f'Failed to match class for {path}')
return match.groups()[0]
def _get_all_filenames(path: str) -> List[str]:
all_filenames = []
for path, _, files in os.walk(path):
for filename in files:
all_filenames.append(os.path.join(path, filename))
return all_filenames
def _extract_all_classnames(all_filenames: List[str]) -> List[Any]:
"""Extracts all the names of classes present in the dataset."""
classnames = []
for filename in all_filenames:
try:
c = _extract_classname_from_path(filename)
if c not in classnames:
classnames.append(c)
except ValueError:
continue
return classnames
def _extract_label_from_path(classname_list: List[str],
path: str) -> Optional[int]:
c = _extract_classname_from_path(path)
label = classname_list.index(c)
return label
def vistex_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports VisTex dataset.
The dataset provides two types of images: reference textures and texture
scenes. We load only the reference textures.
The archive contains a collection of LZW-compressed archives.
The data is organized in 2 subdirectories:
- A subdirectory organised relatively to the images types (reference texture
or texture scene) and classes. It contains symbolic links to the actual
images.
- A subdirectory that contains the actual images arranged according to their
resolution.
It is therefore necessary to unpack all the archives to be able to load the
images we are interested in (i.e. reference textures) and assign the correct
labels to them.
Link:
https://vismod.media.mit.edu/vismod/imagery/VisionTexture/vistex.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
# TODO: Revisit to avoid writing data to disk.
archive_path = gfile.listdir(dataset_path)[0]
utils.unpack_file(archive_path, dataset_path)
unpacked_folder_name_1 = 'VisionTexture'
unpacked_folder_name_2 = 'VisTex'
filenames = gfile.listdir(os.path.join(dataset_path, unpacked_folder_name_1))
_decompress_archives(filenames,
os.path.join(dataset_path, unpacked_folder_name_1))
_unpack_archives(dataset_path, unpacked_folder_name_1)
all_filenames = _get_all_filenames(
os.path.join(dataset_path, unpacked_folder_name_1,
unpacked_folder_name_2))
class_names = _extract_all_classnames(all_filenames)
metadata = types.DatasetMetaData(
num_channels=3,
num_classes=len(class_names),
image_shape=(), # Ignored for now.
preprocessing='random_crop', # select random crops in the images
additional_metadata=dict(
labels=class_names,
task_type='classification',
image_type='texture'
))
def gen():
for filename in all_filenames:
if not re.match(_VALID_IMAGES_REGEX, filename):
continue
image = Image.open(filename)
image.load()
label = _extract_label_from_path(class_names, filename)
yield (image, label)
# TODO: review split function to make sure each class is present
# at least in train and test.
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
vistex_dataset = types.DownloadableDataset(
name='vistex',
download_urls=[
types.DownloadableArtefact(
url='http://vismod.media.mit.edu/pub/VisTex/VisTex.tar.gz',
checksum='f176ad5c9383141f981e3668b232add7')
],
handler=vistex_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/vistex.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.caltech_categories."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import caltech_categories
class CaltechCategoriesTest(parameterized.TestCase):
def test_caltech_categories_handler(self):
fixture_path = self.create_tempdir().full_path
caltech_categories.write_fixture(fixture_path)
dataset = caltech_categories.caltech_categories_dataset
metadata, generators = dataset.handler(fixture_path)
self.assertEqual(metadata.num_classes, 2)
samples = list()
for split in ['train', 'dev', 'dev-test', 'test']:
self.assertIn(split, generators)
samples.extend(list(generators[split]))
self.assertLen(samples, 5 * len(caltech_categories.AVAILABLE_CATEGORIES))
self.assertTrue(all(img.size == (50, 50) for img, label in samples))
seen_labels = set(label for img, label in samples)
self.assertEqual(seen_labels, {0, 1})
self.assertEqual(metadata.additional_metadata['class_names'],
['car', 'motorcycle'])
def test_caltech_categories_handler_cars_only(self):
fixture_path = self.create_tempdir().full_path
caltech_categories.write_fixture(fixture_path)
dataset = caltech_categories.caltech_categories_dataset
metadata, generators = dataset.handler(
fixture_path, categories=['cars_2001'])
self.assertEqual(metadata.num_classes, 1)
samples = list(generators['train'])
seen_labels = set(label for img, label in samples)
self.assertEqual(seen_labels, {0})
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/caltech_categories_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Labeled Faces in the Wild Aligned Dataset handler."""
import os
from typing import List
import zipfile
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_FNAME = 'lfwa.zip'
_MIN_NUM_IMGS = 5
_NUM_CLASSES = 423
_NUM_SAMPLES = 5985
_SPLITS = ['train', 'dev', 'dev-test', 'train_and_dev', 'test']
def _get_range(list_len: int, split: str) -> List[int]:
"""Returns range of indexes for a given split."""
if split == 'train':
return list(range(0, list_len - 4))
elif split == 'dev':
return list(range(list_len - 4, list_len - 3))
elif split == 'dev-test':
return list(range(list_len - 3, list_len - 2))
elif split == 'train_and_dev':
return list(range(0, list_len - 3))
else:
assert split == 'test'
return list(range(list_len - 2, list_len))
def lfwa_handler(dataset_path: str) -> types.HandlerOutput:
"""LFW Aligned dataset.
Please refer to LFW for comments on what this dataset is about, the task and
structure of the package. This is a version of LFW where faces have been
aligned, there is no other difference.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
data = dict()
with zipfile.ZipFile(os.path.join(dataset_path, _FNAME), 'r') as zf:
all_fnames = [ff.filename for ff in zf.infolist()]
all_fnames = [f for f in all_fnames if os.path.splitext(f)[1] == '.jpg']
for member in all_fnames:
image_fname = os.path.basename(member)
subject_name = os.path.basename(os.path.dirname(member))
if subject_name not in data:
data[subject_name] = [image_fname]
else:
data[subject_name].append(image_fname)
splits_fnames = {
'train': [],
'dev': [],
'train_and_dev': [],
'dev-test': [],
'test': []
}
splits_labels = {
'train': [],
'dev': [],
'train_and_dev': [],
'dev-test': [],
'test': []
}
label_id = 0
label_str_to_int = dict()
tot_num_examples = 0
for subject_name, file_list in data.items():
n = len(file_list)
if n < _MIN_NUM_IMGS:
continue
for split in _SPLITS:
srange = _get_range(n, split)
splits_labels[split] += [label_id] * len(srange)
splits_fnames[split] += [file_list[i] for i in srange]
label_str_to_int[subject_name] = label_id
tot_num_examples += n
label_id += 1
assert label_id == _NUM_CLASSES
assert tot_num_examples == _NUM_SAMPLES
def gen(split):
with zipfile.ZipFile(os.path.join(dataset_path, _FNAME), 'r') as zf:
all_fnames = [ff.filename for ff in zf.infolist()]
all_fnames = [f for f in all_fnames if os.path.splitext(f)[1] == '.jpg']
for member in all_fnames:
image_fname = os.path.basename(member)
if image_fname not in splits_fnames[split]:
continue
index = splits_fnames[split].index(image_fname)
label = splits_labels[split][index]
image = Image.open(zf.open(member)).convert('RGB')
image.load()
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_str_to_int,
task_type='classification',
image_type='face',
))
per_split_gen = {}
for split in _SPLITS:
per_split_gen[split] = gen(split)
return metadata, per_split_gen
lfwa_dataset = types.DownloadableDataset(
name='lfwa',
# TODO: Fix download link.
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1p1wjaqpTh_5RHfJu4vUh8JJCdKwYMHCp&export=download&confirm=y',
checksum='96313a4780499f939bc4a06d5bebaf7d')
],
website_url='https://talhassner.github.io/home/projects/lfwa/index.html',
handler=lfwa_handler,
paper_title='Effective Face Recognition by Combining Multiple Descriptors and Learned Background Statistics',
authors='Lior Wolf, Tal Hassner, and Yaniv Taigman',
year='2011')
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/lfwa.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LandSat UCI Repo dataset handler."""
import os
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
_TRAIN_FNAME = "sat.trn"
_TEST_FNAME = "sat.tst"
def landsat_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports LandSat dataset.
The dataset home page is at
https://archive.ics.uci.edu/ml/datasets/Statlog+(Landsat+Satellite)
The dataset consits of two ASCII files, one for training and one for testing.
Each file is a table with as many rows as examples and 38 columns.
The first column is the row number.
The last column is the label.
The remaining values are the values of a 3x3 patch of satellite image with 4
channels.
The task is classification:
1 red soil
2 cotton crop
3 grey soil
4 damp grey soil
5 soil with vegetation stubble
6 mixture class (all types present)
7 very damp grey soil
We are going to drop the last channel and provide as input images the 3x3 (3
channels) patches. There are a total of 4435 patches in the training set.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(artifacts_path)
assert len(files) == 2
label_to_class_index = {
"red soil": 0,
"cotton crop": 1,
"grey soil": 2,
"damp grey soil": 3,
"soil with vegetation stubble": 4,
"mixture class": 5,
"very damp grey soil": 6}
metadata = types.DatasetMetaData(
num_classes=7,
num_channels=3,
image_shape=(3, 3),
additional_metadata=dict(
label_to_id=label_to_class_index,
task_type="classification",
image_type="object"))
def gen(path):
data = np.loadtxt(path)
labels = data[:, -1]
tot_num_samples = data.shape[0]
side = 3
num_channels = 4
all_patches = data[:, :-1].reshape((tot_num_samples, side, side,
num_channels))
for cnt in range(tot_num_samples):
ima = all_patches[cnt, :, :, :-1]
label = int(labels[cnt] - 1)
image = Image.fromarray(ima.astype("uint8"))
yield (image, label)
make_gen_fn = lambda: gen(os.path.join(artifacts_path, _TRAIN_FNAME))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen["test"] = gen(os.path.join(artifacts_path, _TEST_FNAME))
return metadata, per_split_gen
landsat_dataset = types.DownloadableDataset(
name="landsat",
download_urls=[
types.DownloadableArtefact(
url="https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/satimage/sat.trn",
checksum="2c5ba2900da0183cab2c41fdb279fa5b"),
types.DownloadableArtefact(
url="https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/satimage/sat.tst",
checksum="02c995991fecc864e809b2c4c42cd983")
],
handler=landsat_handler,
paper_title="{UCI} Machine Learning Repository",
authors="Dua, Dheeru and Graff, Casey",
year="2019")
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/landsat.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.extraction_utils."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import extraction_utils
import numpy as np
from PIL import Image
_IMG_VALUES_A = np.array([1 for _ in range(4)], dtype=np.uint8)
_IMG_VALUES_B = np.array([2 for _ in range(4)], dtype=np.uint8)
class ExtractionUtilsTest(parameterized.TestCase):
def test_deduplicate_data_generator(self):
duplicated_image_values = (
(Image.fromarray(_IMG_VALUES_A), 1),
(Image.fromarray(_IMG_VALUES_A), 1),
(Image.fromarray(_IMG_VALUES_A), 1),
(Image.fromarray(_IMG_VALUES_B), 2),
(Image.fromarray(_IMG_VALUES_B), 2),
)
expected_image_values = (
(Image.fromarray(_IMG_VALUES_A), 1),
(Image.fromarray(_IMG_VALUES_B), 2),
)
unique_gen = extraction_utils.deduplicate_data_generator(
duplicated_image_values)()
unique_values = tuple(unique_gen)
self.assertLen(unique_values, 2)
self.assertEqual(unique_values, expected_image_values)
@parameterized.parameters([
(16, 16, 16, 16, 16),
(512, 16, 16, 16, 16),
(256, 869, 1079, 206, 256),
(256, 1079, 869, 256, 206),
])
def test_resize_to_max_size(self, max_size, width, height, expected_width,
expected_height):
image = Image.new('RGB', (width, height))
result_image = extraction_utils.resize_to_max_size(image, max_size)
self.assertEqual(result_image.size, (expected_width, expected_height))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/extraction_utils_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST Rotation handler."""
import gzip
import os
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import scipy.ndimage
_TRAIN_IMAGES_FILE = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILE = 'train-labels-idx1-ubyte.gz'
_TEST_IMAGES_FILE = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILE = 't10k-labels-idx1-ubyte.gz'
_NUM_CLASSES = 10
_SEED = 99_999
# pylint:disable=missing-function-docstring
def mnist_rotation_handler(dataset_path: str) -> types.HandlerOutput:
np.random.seed(_SEED)
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr'))
def gen_fn(images_file, labels_file, rotate=True):
images_path = os.path.join(dataset_path, images_file)
labels_path = os.path.join(dataset_path, labels_file)
with gzip.open(images_path, 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16)
images = images.reshape((-1, 28, 28))
with gzip.open(labels_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
for np_image, label in zip(images, labels):
if rotate:
np_image = scipy.ndimage.rotate(np_image,
np.random.randint(0, high=360),
reshape=False)
image = Image.fromarray(np_image)
yield (image, label)
gen_tr = lambda: gen_fn(_TRAIN_IMAGES_FILE, _TRAIN_LABELS_FILE)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen_tr, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_fn(
_TEST_IMAGES_FILE, _TEST_LABELS_FILE, rotate=False)
return metadata, per_split_gen
mnist_rotation_dataset = types.DownloadableDataset(
name='mnist_rotation',
download_urls=[
types.DownloadableArtefact(
url='http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
checksum='f68b3c2dcbeaaa9fbdd348bbdeb94873'),
types.DownloadableArtefact(
url='http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
checksum='d53e105ee54ea40749a09fcbcd1e9432'),
types.DownloadableArtefact(
url='http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
checksum='9fb629c4189551a2d022fa330f9573f3'),
types.DownloadableArtefact(
url='http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
checksum='ec29112dd5afa0611ce80d1b7f02629c')
],
paper_title='SO(2)-equivariance in Neural networks using Fourier nonlinearity',
authors='Muthuvel Murugan and K. V. Subrahmanyam',
year='2019',
handler=mnist_rotation_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/mnist_rotation.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tubercolosis handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'metadata.xlsx',
r'README.md.txt',
])
def _path_to_label_fn(path: str, label_to_id):
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
def tubercolosis_handler(dataset_path: str) -> types.HandlerOutput:
"""Tubercolosis dataset handler."""
files = gfile.listdir(dataset_path)
labels = [
'Normal',
'Tuberculosis',
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='xray',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
ignored_files_regex=_IGNORED_FILES_REGEX,
convert_mode='RGB')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
tubercolosis_dataset = types.DownloadableDataset(
name='tubercolosis',
download_urls=[
types.KaggleDataset(
dataset_name='tawsifurrahman/tuberculosis-tb-chest-xray-dataset',
checksum='3abdda8a1100ff97c9dce7cc2413d941')
],
website_url='https://www.kaggle.com/tawsifurrahman/tuberculosis-tb-chest-xray-dataset',
handler=tubercolosis_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/tubercolosis.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Animal handler."""
import os
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id):
label = os.path.split(os.path.split(path)[0])[1]
return label_to_id[label]
# pylint:disable=missing-function-docstring
def animal_handler(dataset_path: str) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
labels = []
for file in files:
with zipfile.ZipFile(os.path.join(dataset_path, file), 'r') as zf:
labels.extend(set([member.split('/')[1] for member in zf.namelist()[1:]]))
num_classes = len(labels)
num_channels = 1
label_to_id = dict(((label, idx) for idx, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=num_channels,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_to_id=label_to_id))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
animal_dataset = types.DownloadableDataset(
name='animal',
download_urls=[
types.DownloadableArtefact(
url='http://xiang.bai.googlepages.com/non_rigid_shape_A.zip',
checksum='d88d44dc6d2382de3a5857d86fb5d430'),
types.DownloadableArtefact(
url='http://xiang.bai.googlepages.com/non_rigid_shape_B.zip',
checksum='dae4a05b9797a3109078d1553173b9a8')
],
handler=animal_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/animal.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COVID-19 Radiography dataset handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = '|'.join(
[utils.DEFAULT_IGNORED_FILES_REGEX, r'metadata.xlsx', r'README.md.txt'])
_LABELS = ['COVID', 'Lung_Opacity', 'Normal', 'Viral Pneumonia']
def _path_to_label_fn(path: str, label_to_id):
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
def covid_19_xray_handler(dataset_path: str) -> types.HandlerOutput:
"""Covid-19 radiography dataset handler."""
files = gfile.listdir(dataset_path)
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=len(_LABELS),
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='xray',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
ignored_files_regex=_IGNORED_FILES_REGEX,
convert_mode='L')
make_unique_gen_fn = utils.deduplicate_data_generator(make_gen_fn())
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_unique_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
covid_19_xray_dataset = types.DownloadableDataset(
name='covid_19_xray',
download_urls=[
types.KaggleDataset(
dataset_name='tawsifurrahman/covid19-radiography-database',
checksum='1888824db56de7f47b886a536961b763')
],
website_url='https://www.kaggle.com/tawsifurrahman/covid19-radiography-database',
handler=covid_19_xray_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/covid_19_xray.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWA2 dataset handler."""
# TODO: Add multi-label support.
import os
import re
from typing import Callable, Dict, List, Tuple
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_CLASS_NAME_REGEX = r'(?P<class_name>[a-z\+]+)'
_CLASS_REGEX = r'(?P<class_id>[\d]+)\t(%s)' % _CLASS_NAME_REGEX
_PREFIX = 'Animals_with_Attributes2'
_NUM_ATTRIBUTES = 85
# Resize images for this dataset to the given max size. The original images can
# have much larger sizes, however this is unnecessarily high for the task, and
# results in slower training due to increased decoding time.
MAXIMUM_IMAGE_SIZE = 256
def _read_classes_with_labels(classes_fname: str,
zf: zipfile.ZipFile) -> Dict[str, int]:
"""Reads class files for a given zipfile object together with labels."""
classes_dict = dict()
with zf.open(os.path.join(_PREFIX, classes_fname)) as f:
line = f.readline()
while line:
result = re.search(_CLASS_REGEX, line.decode('utf-8'))
classes_dict[result['class_name']] = int(result['class_id'])
line = f.readline()
return classes_dict
def _read_classes_without_labels(classes_fname: str,
zf: zipfile.ZipFile) -> Tuple[str]:
"""Reads class files for a given zipfile object.."""
classes_list = []
with zf.open(os.path.join(_PREFIX, classes_fname)) as f:
line = f.readline()
while line:
result = re.search(_CLASS_NAME_REGEX, line.decode('utf-8'))
classes_list.append(result['class_name'])
line = f.readline()
return tuple(classes_list)
def _read_attributes(attributes_fname: str, zf: zipfile.ZipFile,
get_encoded_attributes_fn: Callable[[str], List[str]]):
"""Reads the attributes for given file and zipfile object."""
attributes_matrix = []
with zf.open(f'Animals_with_Attributes2/{attributes_fname}') as f:
line = f.readline()
while line:
encoded_attributes = line.decode('utf-8').split('\n')[0]
encoded_attributes = get_encoded_attributes_fn(encoded_attributes)
attributes_matrix.append([float(elem) for elem in encoded_attributes])
line = f.readline()
return np.array(attributes_matrix)
def awa2_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports AWA2 dataset.
The AWA2 dataset is taken from https://cvml.ist.ac.at/AwA2/AwA2-data.zip.
Args:
dataset_path: The path to the locally downloaded data assuming that it was
downloaded from the above link location.
Returns:
Metadata and iterables over data for each subset split.
"""
files = gfile.listdir(dataset_path)
with zipfile.ZipFile(os.path.join(dataset_path, files[0]), 'r') as zf:
label_to_id = _read_classes_with_labels('classes.txt', zf)
# Put labels in [0,...,num_classes]:
label_to_id = dict(
((label, label_id - 1) for (label, label_id) in label_to_id.items()))
train_classes = _read_classes_without_labels('trainclasses.txt', zf)
test_classes = _read_classes_without_labels('testclasses.txt', zf)
predicates_to_id = _read_classes_with_labels('predicates.txt', zf)
# Put predicate_ids in [0,...,num_classes]:
predicates_to_id = dict(
((predicate, predicate_id - 1)
for (predicate, predicate_id) in predicates_to_id.items()))
predicate_matrix_binary = _read_attributes(
'predicate-matrix-binary.txt',
zf,
get_encoded_attributes_fn=lambda x: x.split(' ')).astype(int)
predicate_matrix_continuous = _read_attributes(
'predicate-matrix-continuous.txt',
zf,
get_encoded_attributes_fn=lambda x: re.findall(r'\d+\.\d+', x))
metadata = types.DatasetMetaData(
num_channels=3,
num_classes=_NUM_ATTRIBUTES,
image_shape=(), # Ignored for now.
additional_metadata=dict(
image_type='object',
task_type='multilabel',
predicates_to_id=predicates_to_id,
predicate_matrix_binary=predicate_matrix_binary,
predicate_matrix_continuous=predicate_matrix_continuous,
label_to_id=label_to_id,
train_classes=train_classes,
test_classes=test_classes),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=_NUM_ATTRIBUTES)),
'png_encoded_image':
tfds.features.Image()
}))
def gen_split(classes, label_to_id, predicate_matrix_binary):
classes = set(classes)
with zipfile.ZipFile(os.path.join(dataset_path, files[0]), 'r') as zf:
for member in zf.infolist():
if member.is_dir():
continue
if 'JPEGImages' not in member.filename:
continue
current_class = os.path.split(os.path.split(member.filename)[0])[1]
if current_class not in classes:
continue
class_id = label_to_id[current_class]
attributes = np.nonzero(predicate_matrix_binary[class_id])[0].tolist()
image = Image.open(zf.open(member))
image.load()
image.convert('RGB')
image = extraction_utils.resize_to_max_size(image, MAXIMUM_IMAGE_SIZE)
yield types.Example(image=image, label=None, multi_label=attributes)
def make_gen_fn():
return gen_split(train_classes, label_to_id, predicate_matrix_binary)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_split(test_classes, label_to_id,
predicate_matrix_binary)
return (metadata, per_split_gen)
awa2_dataset = types.DownloadableDataset(
name='awa2',
download_urls=[
# TODO: Deal with multi labels.
types.DownloadableArtefact(
url='https://cvml.ist.ac.at/AwA2/AwA2-data.zip',
checksum='eaa27cf799d5cf55af372356d7281b5e')
],
handler=awa2_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/awa2.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caltech Camera Traps handler."""
import json
import os
import tarfile
from typing import Dict
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_IMAGES_FNAME = 'eccv_18_all_images_sm.tar.gz'
_ANNOTATIONS_FNAME = 'eccv_18_annotations.tar.gz'
_ECCV_PATH = 'eccv_18_annotation_files'
_TRAIN_ANNOTATIONS = 'train_annotations.json'
_TRANS_TEST_ANNOTATIONS = 'trans_test_annotations.json'
_TRANS_VAL_ANNOTATIONS = 'trans_val_annotations.json'
_CLASS_NAMES = [
'bobcat',
'opossum',
'empty',
'coyote',
'raccoon',
'bird',
'dog',
'cat',
'squirrel',
'rabbit',
'skunk',
'rodent',
'badger',
'deer',
'car',
'fox',
]
def _read_annotations(ann_fname: str, label_to_id: Dict[str, int],
tf: tarfile.TarFile) -> Dict[str, int]:
"""Creates a dictionary mapping image filename to correspondsing label."""
annotations_data = json.load(
tf.extractfile(os.path.join(_ECCV_PATH, ann_fname)))
categories = annotations_data['categories']
categories = {category['id']: category['name'] for category in categories}
annotations = dict()
for ann in annotations_data['annotations']:
image_id = ann['image_id']
category_id = ann['category_id']
annotations[image_id] = label_to_id[categories[category_id]]
return annotations
def caltech_camera_traps_handler(dataset_path: str) -> types.HandlerOutput:
"""Caltech Camera Traps handler."""
label_to_id = {class_name: i for i, class_name in enumerate(_CLASS_NAMES)}
metadata = types.DatasetMetaData(
num_classes=16,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
with tarfile.open(os.path.join(dataset_path, _ANNOTATIONS_FNAME)) as tf:
train_annotations = _read_annotations(_TRAIN_ANNOTATIONS, label_to_id, tf)
trans_val_ann = _read_annotations(_TRANS_VAL_ANNOTATIONS, label_to_id, tf)
trans_test_ann = _read_annotations(_TRANS_TEST_ANNOTATIONS, label_to_id, tf)
def make_gen_fn(annotations):
with tarfile.open(os.path.join(dataset_path, _IMAGES_FNAME), 'r|gz') as tf:
for member in tf:
if member.isdir():
continue
image_id = os.path.basename(os.path.splitext(member.path)[0])
if image_id not in annotations:
continue
label = annotations[image_id]
image = Image.open(tf.extractfile(member)).convert('RGB')
image.load()
yield (image, label)
make_train_gen_fn = lambda: make_gen_fn(train_annotations)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_train_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['dev-test'] = make_gen_fn(trans_val_ann)
per_split_gen['test'] = make_gen_fn(trans_test_ann)
return metadata, per_split_gen
caltech_camera_traps_dataset = types.DownloadableDataset(
name='caltech_camera_traps',
download_urls=[
types.DownloadableArtefact(
url='https://lilablobssc.blob.core.windows.net/caltechcameratraps/eccv_18_all_images_sm.tar.gz',
checksum='8143c17aa2a12872b66f284ff211531f'),
types.DownloadableArtefact(
url='https://lilablobssc.blob.core.windows.net/caltechcameratraps/eccv_18_annotations.tar.gz',
checksum='66a1f481b44aa1edadf75c9cfbd27aba')
],
website_url='https://lila.science/datasets/caltech-camera-traps',
handler=caltech_camera_traps_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/caltech_camera_traps.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Casia HWDB 1.1 dataset handler."""
import codecs
import io
import os
import struct
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
def casia_hwdb_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports Casia HWDB 1.1 dataset.
The dataset home page is at
http://www.nlpr.ia.ac.cn/databases/handwriting/Offline_database.html
This dataset is an OCR dataset of recognition of handwriting Chinese
characters.
The dataset comes as a set of 3 zip files each containing several binary files
with the actual images in a binary format (see website for details). Images
have variable spatial resolution, and the background is set to 255.
There are a total of 1,172,907 images and 3926 labels (Chinese and other
characters).
The data is pre-split into training and test sets, as specified by the
filenames of the zip files.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(artifacts_path)
assert files
def extract_data(stream, keep_ima):
# keep_ima is a boolean flag. If it is true then we return also a list
# with the images present in the binary stream. If it is false we return an
# empty list (and save time and memory).
images = []
labels = []
# Get total number of bytes, useful to know when to stop reading.
stream.seek(0, 2)
eof = stream.tell()
stream.seek(0, 0)
while stream.tell() < eof:
packed_length = stream.read(4)
length = struct.unpack("<I", packed_length)[0]
raw_label = struct.unpack(">cc", stream.read(2))
try:
label = codecs.decode(raw_label[0] + raw_label[1], encoding="gb2312-80")
except Exception: # pylint: disable=broad-except
# In few cases decoding fails, we then store the raw byte label.
label = raw_label[0] + raw_label[1]
width = struct.unpack("<H", stream.read(2))[0]
height = struct.unpack("<H", stream.read(2))[0]
assert length == width * height + 10
raw_image = struct.unpack("{}B".format(height * width),
stream.read(height * width))
if keep_ima:
image = np.array(raw_image, dtype=np.uint8).reshape(height, width)
image = 255 - image # Set background to 0.
images.append(Image.fromarray(image))
labels.append(label)
return images, labels
# Do a first pass over the data to figure out the labels.
print("Extracting labels")
label_str_to_int = dict()
all_labels = []
for zip_fname in files:
with zipfile.ZipFile(os.path.join(artifacts_path, zip_fname), "r") as zf:
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if f.is_dir():
continue
_, labels = extract_data(io.BytesIO(zf.read(f)), False)
all_labels += labels
num_examples = len(all_labels)
all_labels = list(set(all_labels)) # Get unique labels.
num_classes = len(all_labels)
for i, label in enumerate(all_labels):
label_str_to_int[label] = i
print("There are a total of " + str(num_examples) + " examples and " +
str(len(all_labels)) + " labels.")
metadata = types.DatasetMetaData(
num_classes=num_classes, # 3926
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_str_to_int,
task_type="classification",
image_type="ocr"))
def make_gen(curr_files):
for zip_fname in curr_files:
with zipfile.ZipFile(os.path.join(artifacts_path, zip_fname), "r") as zf:
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if f.is_dir():
continue
images, labels = extract_data(io.BytesIO(zf.read(f)), True)
for image, label in zip(images, labels):
yield (image, label_str_to_int[label])
training_files = ["Gnt1.1TrainPart1.zip", "Gnt1.1TrainPart2.zip"]
make_gen_fn = lambda: make_gen(training_files)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen["test"] = make_gen(["Gnt1.1Test.zip"])
return metadata, per_split_gen
# pylint: disable=line-too-long
# Unofficial links (but still from the authors) for the three files are:
# - https://mail.cstnet.cn/coremail/downloadFile.jsp?key=1U31SsvkjqFd-VIyT8jLTejXo9vqo90yo9FLUu2kTDawSstETnvC3ZJdantET9GJfgKm3cKq3XKyTgGka1AGF4fdTuxEOnECzVFA3sa9wZEXaWCLTejXo9vqo90yo9FLwujXTWCyfu2LaV7L3ZtmjeIk-s71UUUUjU2l39EtjqanVW8XFWDZFW3Zr4UKw45yVZGmSsFdUZCmaUtsU18USUjgUnkU7DjBUnrU4UjjUnAU4DjKU0xUx7jFU88UcUj2U88U77jWU0fj-7wfvpGQ&code=cv62nuho
# - https://gzc-download.ftn.qq.com/ftn_handler/f2fdd9604fbe398e97ed995547e4c31ef0460bab2a0afae99aa1526566f5251d819ba1210e5df5ed0df45ff8ad04d1a1e0ce689d26c00ec724bc97425a87e46d/?fname=Gnt1.1TrainPart2.zip&xffz=966309492&k=9c9b0132e9731ec9aee84a323734346417d2fc0b35343464464b5c0b530506505a56490b02550149575c5054180d0306054900575700035451055051570c2064240a10031b056016020d0a62544640564d1e0d42213f39deeb7da2617ba5ce637d2a8788a0cd38e4ab&code=cdd2544d
# - https://njc-download.ftn.qq.com/ftn_handler/7eedd59d4d7a6f3ccbef7057e36f92abd09aa682a0f325491ead9454ea469ad0c68165f3683402a77b59e8085a8950a650aca0848436379699601bad022b5d20/?fname=Gnt1.1Test.zip&xffz=479659120&k=cb9c6b61e9234f98f9ef1a61376465354467a37d35646535114c0d000c505d040555195757015c1800525057185c560d044e0202505c065605510d0251556b35730d40501b55315047171a1b5c14719be76aeaae89656afaf0d85f83d48a3a643e51f0&code=4c4a5de5
# In the event that the official server times out, the above links can be used
# as a fall back.
casia_hwdb_dataset = types.DownloadableDataset(
name="casia_hwdb",
download_urls=[
types.DownloadableArtefact(
url="http://www.nlpr.ia.ac.cn/databases/Download/Offline/CharData/Gnt1.1TrainPart1.zip",
checksum="72bac7b6a5ce37f184f277421adfacfd"),
types.DownloadableArtefact(
url="http://www.nlpr.ia.ac.cn/databases/Download/Offline/CharData/Gnt1.1TrainPart2.zip",
checksum="a8b76e4eccfb1fd8d56c448f6a096c27"),
types.DownloadableArtefact(
url="http://www.nlpr.ia.ac.cn/databases/Download/Offline/CharData/Gnt1.1Test.zip",
checksum="e74f9a4863e73d8b80ed402452c97448")
],
handler=casia_hwdb_handler,
paper_title="CASIA Online and Offline Chinese Handwriting Databases",
authors="Cheng-Lin Liu, Fei Yin, Da-Han Wang, Qiu-Feng Wang",
year="2011")
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/casia_hwdb.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ShanghaiTech dataset handler."""
import os
from typing import List
import zipfile
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import scipy.io
from tensorflow.io import gfile
_NUM_CLASSES = 10
_PERC_DEV = 0.15
_PERC_DEV_TEST = 0.15
def shanghai_tech_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Shanghai Tech dataset.
Link: https://www.kaggle.com/tthien/shanghaitech
The dataset comes in two parts, A and B. Each has its own training and test
split. The task is about counting how many people there are in the images.
Similarly to other counting datasets, we quantize the counts into buckets with
roughtly the same number of images. We also merge the training and test sets
of part A and B into a single training and test set.
The zip files are organized as follows:
1) the folder ShanghaiTech/part_{A|B}/{train|test}_data/ground-truth contains
matlab files storing the coordinates of each head in the image. One can get
the number of people in the image by querying for instance:
m = scipy.io.loadmat('GT_IMG_1.mat')
m['image_info'][0][0][0][0][0].shape[0]
2) the folder ShanghaiTech/part_{A|B}/{train|test}_data/images contains jpeg
images, mostly RGB with some gray scale image as well.
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
dfile = gfile.listdir(dataset_path)
assert len(dfile) == 1
dfile = dfile[0]
max_size = 5000
counts = dict()
histogram = np.zeros(max_size)
training_files = []
test_files = []
train_files = []
dev_files = []
devtest_files = []
train_dev_files = []
def convert_filename(fullname):
# Convert the label filename into image filename.
pieces = fullname.split('/')
filename = pieces[-1]
filename_pieces = filename.split('_')
return os.path.join(pieces[0], pieces[1], pieces[2],
'images',
filename_pieces[1] + '_' + filename_pieces[2][:-3] +
'jpg')
with zipfile.ZipFile(os.path.join(dataset_path, dfile), 'r') as zf:
# Go over all samples and collect the count information to build a
# histogram, and the to eventually bucket the counts.
for f in zf.infolist():
if (f.is_dir() or 'shanghaitech_h5_empty' in f.filename or
not f.filename.endswith('mat')):
continue
with zf.open(f) as fo:
gf = scipy.io.loadmat(fo)
curr_count = gf['image_info'][0][0][0][0][0].shape[0]
image_name = convert_filename(f.filename)
counts[image_name] = curr_count
histogram[curr_count if (curr_count < max_size) else max_size-1] += 1
if 'train' in image_name:
training_files.append(image_name)
else:
test_files.append(image_name)
# quantize the counts into equally sized buckets.
tot_num_samples = histogram.sum()
cumsum = np.cumsum(histogram)
num_examples_per_bucket = tot_num_samples / _NUM_CLASSES
intervals = []
for cnt in range(1, _NUM_CLASSES):
indices = np.where(cumsum < num_examples_per_bucket * cnt)
assert indices[0].shape[0] > 0
intervals.append(indices[0][-1])
intervals.append(max_size)
count_to_label = []
label = 0
prev_cnt = 0
classname_to_label = dict()
# Compute count to label mapping,
# and string to label mapping.
for cnt in range(max_size):
if cnt > intervals[label]:
classname = '%d-%d' % (prev_cnt, cnt)
classname_to_label[classname] = label
label += 1
prev_cnt = cnt + 1
count_to_label.append(label)
classname = '%d-%d' % (prev_cnt, max_size)
classname_to_label[classname] = label
assert label == _NUM_CLASSES - 1
# partition the original training set into train, dev and dev-test
rng = np.random.default_rng(seed=1)
tot_num_train_samples = len(training_files)
shuffle = rng.permutation(tot_num_train_samples).tolist()
ranges = [0, int((1-_PERC_DEV_TEST-_PERC_DEV)*tot_num_train_samples),
int((1-_PERC_DEV_TEST)*tot_num_train_samples),
tot_num_train_samples]
train_ids = shuffle[ranges[0]:ranges[1]]
dev_ids = shuffle[ranges[1]:ranges[2]]
train_dev_ids = shuffle[ranges[0]:ranges[2]]
devtest_ids = shuffle[ranges[2]:ranges[3]]
train_files += [training_files[cc] for cc in train_ids]
dev_files += [training_files[cc] for cc in dev_ids]
train_dev_files += [training_files[cc] for cc in train_dev_ids]
devtest_files += [training_files[cc] for cc in devtest_ids]
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=classname_to_label,
task_type='regression',
image_type='counting'))
def gen_split(imageset: List[str]):
with zipfile.ZipFile(os.path.join(dataset_path, dfile), 'r') as zf:
all_files = [f.filename for f in zf.infolist()]
for fname in imageset:
assert fname in all_files
image = Image.open(zf.open(fname)).convert('RGB')
image.load()
label = count_to_label[counts[fname]]
yield (image, label)
return (metadata, {
'train': gen_split(train_files),
'dev': gen_split(dev_files),
'train_and_dev': gen_split(train_dev_files),
'dev-test': gen_split(devtest_files),
'test': gen_split(test_files),
})
shanghai_tech_dataset = types.DownloadableDataset(
name='shanghai_tech',
download_urls=[
types.KaggleDataset(
dataset_name='tthien/shanghaitech',
checksum='f547d65447063405ea78ab7fa9ae721b')
],
handler=shanghai_tech_handler,
paper_title='Single-Image Crowd Counting via Multi-Column Convolutional Neural Network',
authors='Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, Yi Ma',
year='2016')
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/shanghai_tech.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Labeled Faces in the Wild Dataset handler."""
import os
import tarfile
from typing import List
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_FNAME = 'lfw.tgz'
_MIN_NUM_IMGS = 5
_NUM_CLASSES = 423
_NUM_SAMPLES = 5985
_SPLITS = ['train', 'dev', 'dev-test', 'train_and_dev', 'test']
def _get_range(list_len: int, split: str) -> List[int]:
"""Returns range of indexes for a given split."""
if split == 'train':
return list(range(0, list_len - 4))
elif split == 'dev':
return list(range(list_len - 4, list_len - 3))
elif split == 'dev-test':
return list(range(list_len - 3, list_len - 2))
elif split == 'train_and_dev':
return list(range(0, list_len - 3))
else:
assert split == 'test'
return list(range(list_len - 2, list_len))
def lfw_handler(dataset_path: str) -> types.HandlerOutput:
"""LFW dataset.
LFW is originally a face verification dataset: Given a pair of images, predict
whether the faces are from the same subject.
Here we turn the face verification task into face classification: Given a
single image, predict the subject id. This means that test images must contain
subjects seen at training time. Therefore, we remove subjects that have less
than MIN_NUM_IMGS examples, since we need to form a train/dev/dev-test/test
splt.
We take the last two images of each subject for testing. The one before for
dev-test. The one before that one for dev, and the remaining for training.
The original dataset has 13233 images from 5749 subjects. If we restrict the
number of subjects to those that have at least 5 images, then we reduce the
dataset to NUM_SAMPLES images and NUM_CLASSES subjects.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
data = dict()
with tarfile.open(os.path.join(dataset_path, _FNAME), 'r|gz') as tf:
for member in tf:
if member.isdir():
continue
image_fname = os.path.basename(member.path)
subject_name = os.path.basename(os.path.dirname(member.path))
if subject_name not in data:
data[subject_name] = [image_fname]
else:
data[subject_name].append(image_fname)
splits_fnames = {
'train': [],
'dev': [],
'train_and_dev': [],
'dev-test': [],
'test': []
}
splits_labels = {
'train': [],
'dev': [],
'train_and_dev': [],
'dev-test': [],
'test': []
}
label_id = 0
label_str_to_int = dict()
tot_num_examples = 0
for subject_name, file_list in data.items():
n = len(file_list)
if n < _MIN_NUM_IMGS:
continue
for split in _SPLITS:
srange = _get_range(n, split)
splits_labels[split] += [label_id] * len(srange)
splits_fnames[split] += [file_list[i] for i in srange]
label_str_to_int[subject_name] = label_id
tot_num_examples += n
label_id += 1
assert label_id == _NUM_CLASSES
assert tot_num_examples == _NUM_SAMPLES
def gen(split):
with tarfile.open(os.path.join(dataset_path, _FNAME), 'r|gz') as tf:
for member in tf:
if member.isdir():
continue
image_fname = os.path.basename(member.path)
if image_fname not in splits_fnames[split]:
continue
index = splits_fnames[split].index(image_fname)
label = splits_labels[split][index]
image = Image.open(tf.extractfile(member)).convert('RGB')
image.load()
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_str_to_int,
task_type='classification',
image_type='face',
))
per_split_gen = {}
for split in _SPLITS:
per_split_gen[split] = gen(split)
return metadata, per_split_gen
lfw_dataset = types.DownloadableDataset(
name='lfw',
download_urls=[
types.DownloadableArtefact(
url='http://vis-www.cs.umass.edu/lfw/lfw.tgz',
checksum='a17d05bd522c52d84eca14327a23d494')
],
website_url='http://vis-www.cs.umass.edu/lfw/',
handler=lfw_handler,
paper_title='Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments',
authors='Gary B. Huang, Manu Ramesh, Tamara Berg, and Erik Learned-Miller',
year='2007')
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/lfw.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All handlers."""
from typing import Iterable, List, Optional, Union, Tuple
from dm_nevis.datasets_storage.handlers import aberdeen
from dm_nevis.datasets_storage.handlers import aloi
from dm_nevis.datasets_storage.handlers import alot
from dm_nevis.datasets_storage.handlers import animal
from dm_nevis.datasets_storage.handlers import animal_web
from dm_nevis.datasets_storage.handlers import awa2
from dm_nevis.datasets_storage.handlers import belgium_tsc
from dm_nevis.datasets_storage.handlers import biwi
from dm_nevis.datasets_storage.handlers import brodatz
from dm_nevis.datasets_storage.handlers import butterflies
from dm_nevis.datasets_storage.handlers import caltech256
from dm_nevis.datasets_storage.handlers import caltech_camera_traps
from dm_nevis.datasets_storage.handlers import caltech_categories
from dm_nevis.datasets_storage.handlers import casia_hwdb
from dm_nevis.datasets_storage.handlers import chars74k
from dm_nevis.datasets_storage.handlers import cmu_amp_expression
from dm_nevis.datasets_storage.handlers import coco
from dm_nevis.datasets_storage.handlers import coil20
from dm_nevis.datasets_storage.handlers import covid_19_xray
from dm_nevis.datasets_storage.handlers import cvc_muscima
from dm_nevis.datasets_storage.handlers import ddsm
from dm_nevis.datasets_storage.handlers import extended_yaleb
from dm_nevis.datasets_storage.handlers import fgvc_aircraft
from dm_nevis.datasets_storage.handlers import flickr_material_database as fmd
from dm_nevis.datasets_storage.handlers import food101
from dm_nevis.datasets_storage.handlers import food101n
from dm_nevis.datasets_storage.handlers import german_tsr
from dm_nevis.datasets_storage.handlers import iaprtc12
from dm_nevis.datasets_storage.handlers import ig02
from dm_nevis.datasets_storage.handlers import interact
from dm_nevis.datasets_storage.handlers import kth_tips
from dm_nevis.datasets_storage.handlers import landsat
from dm_nevis.datasets_storage.handlers import lfw
from dm_nevis.datasets_storage.handlers import lfwa
from dm_nevis.datasets_storage.handlers import magellan_venus_volcanoes
from dm_nevis.datasets_storage.handlers import mall
from dm_nevis.datasets_storage.handlers import melanoma
from dm_nevis.datasets_storage.handlers import mit_scenes
from dm_nevis.datasets_storage.handlers import mnist_m
from dm_nevis.datasets_storage.handlers import mnist_rotation
from dm_nevis.datasets_storage.handlers import mpeg7
from dm_nevis.datasets_storage.handlers import nih_chest_xray
from dm_nevis.datasets_storage.handlers import not_mnist
from dm_nevis.datasets_storage.handlers import office31
from dm_nevis.datasets_storage.handlers import office_caltech_10
from dm_nevis.datasets_storage.handlers import olivetti_face
from dm_nevis.datasets_storage.handlers import oxford_flowers_17
from dm_nevis.datasets_storage.handlers import pacs
from dm_nevis.datasets_storage.handlers import pascal_voc2005
from dm_nevis.datasets_storage.handlers import pascal_voc2006
from dm_nevis.datasets_storage.handlers import pascal_voc2007
from dm_nevis.datasets_storage.handlers import path_mnist
from dm_nevis.datasets_storage.handlers import pneumonia_chest_xray
from dm_nevis.datasets_storage.handlers import ppmi
from dm_nevis.datasets_storage.handlers import scenes15
from dm_nevis.datasets_storage.handlers import scenes8
from dm_nevis.datasets_storage.handlers import semeion
from dm_nevis.datasets_storage.handlers import shanghai_tech
from dm_nevis.datasets_storage.handlers import silhouettes
from dm_nevis.datasets_storage.handlers import sketch
from dm_nevis.datasets_storage.handlers import stanford_cars
from dm_nevis.datasets_storage.handlers import sun_attributes
from dm_nevis.datasets_storage.handlers import synthetic_covid19_xray
from dm_nevis.datasets_storage.handlers import tid
from dm_nevis.datasets_storage.handlers import tiny_imagenet
from dm_nevis.datasets_storage.handlers import trancos
from dm_nevis.datasets_storage.handlers import tubercolosis
from dm_nevis.datasets_storage.handlers import types
from dm_nevis.datasets_storage.handlers import uiuc_cars
from dm_nevis.datasets_storage.handlers import uiuc_texture
from dm_nevis.datasets_storage.handlers import umd
from dm_nevis.datasets_storage.handlers import umist
from dm_nevis.datasets_storage.handlers import usps
from dm_nevis.datasets_storage.handlers import vistex
from dm_nevis.datasets_storage.handlers import voc_actions
from dm_nevis.datasets_storage.handlers import wiki_paintings
_DATASETS_TO_HANDLERS = {
'sun_attributes':
sun_attributes.sun_attributes_dataset,
'animal_web':
animal_web.animal_web_dataset,
'aberdeen':
aberdeen.aberdeen_dataset,
'animal':
animal.animal_dataset,
'aloi':
aloi.aloi_dataset,
'aloi_grey':
aloi.aloi_grey_dataset,
'alot':
alot.alot_dataset, # NOTYPO
'alot_grey':
alot.alot_grey_dataset, # NOTYPO
'awa2':
awa2.awa2_dataset,
'belgium_tsc':
belgium_tsc.belgium_tsc_dataset,
'biwi':
biwi.biwi_dataset,
'brodatz':
brodatz.brodatz_dataset,
'butterflies':
butterflies.butterflies_dataset,
'chars74k':
chars74k.chars74k_dataset,
'caltech256':
caltech256.caltech256_dataset,
'caltech_categories':
caltech_categories.caltech_categories_dataset,
'casia_hwdb':
casia_hwdb.casia_hwdb_dataset,
'cmu_amp_expression':
cmu_amp_expression.cmu_amp_expression_dataset,
'coco_single_label':
coco.coco_single_label_dataset,
'coco_multi_label':
coco.coco_multi_label_dataset,
'coil20':
coil20.coil_20_dataset,
'coil20_unproc':
coil20.coil_20_unproc_dataset,
'extended_yaleb':
extended_yaleb.extended_yaleb_dataset,
'fgvc_aircraft_family':
fgvc_aircraft.fgvc_aircraft_family_dataset,
'fgvc_aircraft_manufacturer':
fgvc_aircraft.fgvc_aircraft_manufacturer_dataset,
'fgvc_aircraft_variant':
fgvc_aircraft.fgvc_aircraft_variant_dataset,
'covid_19_xray':
covid_19_xray.covid_19_xray_dataset,
'ddsm':
ddsm.ddsm_dataset,
'flickr_material_database':
fmd.flickr_material_database_dataset,
'german_tsr':
german_tsr.german_tsr_dataset,
'iaprtc12':
iaprtc12.iaprtc12_dataset,
'ig02':
ig02.ig02_dataset,
'interact':
interact.interact_dataset,
'kth_tips':
kth_tips.kth_tips_dataset,
'kth_tips_grey':
kth_tips.kth_tips_grey_dataset,
'kth_tips_2a':
kth_tips.kth_tips_2a_dataset,
'kth_tips_2b':
kth_tips.kth_tips_2b_dataset,
'landsat':
landsat.landsat_dataset,
'lfw':
lfw.lfw_dataset,
'lfwa':
lfwa.lfwa_dataset,
'magellan_venus_volcanoes':
magellan_venus_volcanoes.magellan_venus_volcanoes_dataset,
'mall':
mall.mall_dataset,
'melanoma':
melanoma.melanoma_dataset,
'mit_scenes':
mit_scenes.mit_scenes_dataset,
'mnist_m':
mnist_m.mnist_m_dataset,
'nih_chest_xray':
nih_chest_xray.nih_chest_xray_dataset,
'office31':
office31.office31_dataset,
'office_caltech_10':
office_caltech_10.office_caltech_10_dataset,
'pacs':
pacs.pacs_dataset,
'pascal_voc2005':
pascal_voc2005.pascal_voc2005_dataset,
'pascal_voc2006':
pascal_voc2006.pascal_voc2006_dataset,
'pascal_voc2007':
pascal_voc2007.pascal_voc2007_dataset,
'ppmi':
ppmi.ppmi_dataset,
'scenes8':
scenes8.scenes8_dataset,
'scenes15':
scenes15.scenes15_dataset,
'shanghai_tech':
shanghai_tech.shanghai_tech_dataset,
'silhouettes_16':
silhouettes.silhouettes_16_dataset,
'silhouettes_28':
silhouettes.silhouettes_28_dataset,
'sketch':
sketch.sketch_dataset,
'not_mnist':
not_mnist.not_mnist_dataset,
'oxford_flowers_17':
oxford_flowers_17.oxford_flowers_17_dataset,
'trancos':
trancos.trancos_dataset,
'synthetic_covid19_xray':
synthetic_covid19_xray.synthetic_covid19_xray_dataset,
'stanford_cars':
stanford_cars.stanford_cars_dataset,
'tid2008':
tid.tid2008_dataset,
'tid2013':
tid.tid2013_dataset,
'olivetti_face':
olivetti_face.olivetti_face_dataset,
'path_mnist':
path_mnist.path_mnist_dataset,
'pneumonia_chest_xray':
pneumonia_chest_xray.pneumonia_chest_xray_dataset,
'tubercolosis':
tubercolosis.tubercolosis_dataset,
'uiuc_cars':
uiuc_cars.uiuc_cars_dataset,
'uiuc_texture':
uiuc_texture.uiuc_texture_dataset,
'umist':
umist.umist_dataset,
'usps':
usps.usps_dataset,
'semeion':
semeion.semeion_dataset,
'food101':
food101.food101_dataset,
'food101n':
food101n.food101n_dataset,
'caltech_camera_traps':
caltech_camera_traps.caltech_camera_traps_dataset,
'cvc_muscima':
cvc_muscima.cvc_muscima_dataset,
'mpeg7':
mpeg7.mpeg7_dataset,
'tiny_imagenet':
tiny_imagenet.tiny_imagenet_dataset,
'mnist_rotation':
mnist_rotation.mnist_rotation_dataset,
'umd':
umd.umd_dataset,
'vistex':
vistex.vistex_dataset,
'voc_actions':
voc_actions.voc_actions_dataset,
'wiki_paintings_artist':
wiki_paintings.wiki_paintings_dataset_artist,
'wiki_paintings_genre':
wiki_paintings.wiki_paintings_dataset_genre,
'wiki_paintings_style':
wiki_paintings.wiki_paintings_dataset_style,
}
def get_links_for_dataset(dataset: str) -> List[types.Artefact]:
return _DATASETS_TO_HANDLERS[dataset].download_urls
def get_handler_for_dataset(dataset: str) -> types.Handler:
return _DATASETS_TO_HANDLERS[dataset].handler
def is_dataset_available(dataset_name: str) -> bool:
return dataset_name in _DATASETS_TO_HANDLERS
def dataset_names() -> Iterable[str]:
return _DATASETS_TO_HANDLERS.keys()
def get_dataset(dataset_name: str) -> types.DownloadableDataset:
return _DATASETS_TO_HANDLERS[dataset_name]
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Silhouettes handler."""
import functools
import os
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
import scipy.io
# pylint:disable=missing-function-docstring
def silhouettes_handler(dataset_path: str,
size: int = 16) -> types.HandlerOutput:
silhouettes_fname = 'caltech101_silhouettes_%d.mat' % size
silhouettes_split_fname = 'caltech101_silhouettes_%d_split1.mat' % size
silhouettes = scipy.io.loadmat(os.path.join(dataset_path, silhouettes_fname))
silhouettes_split = scipy.io.loadmat(
os.path.join(dataset_path, silhouettes_split_fname))
class_to_label = dict()
for class_id, class_name in enumerate(silhouettes['classnames'].flatten()):
class_to_label[class_name.item()] = class_id
num_classes = len(class_to_label)
def split_gen(data, split, size):
if split != 'all':
images, labels = data['%s_data' % split], data['%s_labels' % split]
labels = labels[:, 0] - 1
else:
images, labels = data['X'], data['Y']
labels = labels[0] - 1
# Original images are in [0, 1]
images *= 255
for i in range(len(images)):
image = Image.fromarray(images[i].reshape((size, size)))
label = labels[i]
yield image, label
make_gen_fn = lambda: split_gen(silhouettes_split, 'train', size)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['dev-test'] = split_gen(silhouettes_split, 'val', size)
per_split_gen['test'] = split_gen(silhouettes_split, 'test', size)
metadata = types.DatasetMetaData(
num_channels=1,
num_classes=num_classes,
image_shape=(), # Ignored for now.
additional_metadata=dict(class_to_label=class_to_label))
return metadata, per_split_gen
silhouettes_16_dataset = types.DownloadableDataset(
name='silhouettes_16',
download_urls=[
types.DownloadableArtefact(
url='https://people.cs.umass.edu/~marlin/data/caltech101_silhouettes_16.mat',
checksum='c79e99a89e9306069ac91b462be1504a'),
types.DownloadableArtefact(
url='https://people.cs.umass.edu/~marlin/data/caltech101_silhouettes_16_split1.mat',
checksum='3baf9e2c023aa4a187e1d1b92b5a734a')
],
handler=functools.partial(silhouettes_handler, size=16))
silhouettes_28_dataset = types.DownloadableDataset(
name='silhouettes_28',
download_urls=[
types.DownloadableArtefact(
url='https://people.cs.umass.edu/~marlin/data/caltech101_silhouettes_28.mat',
checksum='1432d2809e8bf111f1104a234731ddb1'),
types.DownloadableArtefact(
url='https://people.cs.umass.edu/~marlin/data/caltech101_silhouettes_28_split1.mat',
checksum='4483e9c14b188fd09937f9ea6f9ea777')
],
handler=functools.partial(silhouettes_handler, size=28))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/silhouettes.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Types required for handlers."""
import dataclasses
from typing import Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
import numpy as np
from PIL import Image as pil_image
import tensorflow as tf
import tensorflow_datasets as tfds
ImageLike = Union[pil_image.Image, tf.Tensor]
ScalarLike = Union[np.ndarray, tf.Tensor, int, float]
class Example(NamedTuple):
image: Optional[ImageLike]
label: Optional[ScalarLike]
multi_label: Optional[List[ScalarLike]]
@dataclasses.dataclass
class DatasetMetaData:
num_classes: int
num_channels: int
image_shape: Tuple[int]
additional_metadata: Dict[str, Any]
features: Optional[tfds.features.FeaturesDict] = None
preprocessing: str = ""
Image = pil_image.Image
Label = int
# We consider union for backwards compatibility for tuples (image, label)
DataGenerator = Iterable[Union[Tuple[Image, Label], Example]]
HandlerOutput = Tuple[DatasetMetaData, Dict[str, DataGenerator]]
Handler = Callable[[str], HandlerOutput]
FixtureWriterFn = Callable[[str], None]
@dataclasses.dataclass(frozen=True)
class KaggleCompetition:
"""Static metadata for kaggle competitions."""
competition_name: str
checksum: Optional[str] = None
@dataclasses.dataclass(frozen=True)
class KaggleDataset:
"""Static metadata for kaggle Dataset.
Dataset name is of the format `user/dataset`.
"""
dataset_name: str
checksum: Optional[str] = None
@dataclasses.dataclass(frozen=True)
class DownloadableArtefact:
"""Static metadata for any artefact that can be downloaded."""
url: str
checksum: Optional[str] = None
Artefact = Union[DownloadableArtefact, KaggleCompetition, KaggleDataset]
@dataclasses.dataclass(frozen=True)
class DownloadableDataset:
"""Static metadata for downloadabe datasets.
Attributes:
name: The name of the dataset.
handler: A callable that generates iterators over the dataset features,
given a path contaiing the downloaded dataset artifacts.
download_urls: URLs for all of the dataset artifacts or kaggle
competition/dataset.
manual_download: If the data artifacts must be manually downloaded.
website_url: The (optional) dataset homepage.
paper_url: The (optional) URL for the dataset.
paper_title: The (optional) title of the paper where the dataset is defined.
authors: The (optional) authors of the dataset's paper.
year: The (optional) year the dataset's paper was published.
papers_with_code_url: If the dataset has a page on papers with code, it may
be stored here.
fixture_writer: An (optional) function to write a dataset fixture for the
dataset. This is a callable that writes fixture versions of the dataset
artifact to a given path, and is intended for use in tests.
"""
name: str
handler: Handler
download_urls: List[Artefact]
manual_download: bool = False
website_url: Optional[str] = None
paper_url: Optional[str] = None
paper_title: Optional[str] = None
authors: Optional[str] = None
year: Optional[int] = None
papers_with_code_url: Optional[str] = None
fixture_writer: Optional[FixtureWriterFn] = None
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/types.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trancos dataset handler."""
import os
import tarfile
from typing import List
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
_NUM_CLASSES = 10
_IMAGE_PATH = 'TRANCOS_v3/images/'
_SETS_PATH = 'TRANCOS_v3/image_sets/'
def trancos_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Trancos dataset.
Link: https://gram.web.uah.es/data/datasets/trancos/index.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
dfile = gfile.listdir(dataset_path)
assert len(dfile) == 1
dfile = dfile[0]
def extract_tarinfos(tfile, startstr, endstr):
return [
tarinfo for tarinfo in tfile.getmembers()
if (tarinfo.name.startswith(startstr) and
tarinfo.name.endswith(endstr))]
def extract_class_name(path: str, suffix_length: int) -> str:
return path.split('/')[-1][:-suffix_length]
# Each image has an associated txt file where each line
# contains the x,y coordinate of a vehicle.
# The number of cars is the label we want to predict, in principle.
# In practice, we will declare the label to be the quantized value
# of the number of cars in the image, turning counting in standard
# classification.
with tarfile.open(os.path.join(dataset_path, dfile), 'r:gz') as tfile:
label_tarinfos = extract_tarinfos(tfile, _IMAGE_PATH, '.txt')
imageid_to_count = dict()
max_size = 200
histogram = np.zeros(max_size)
for ltar in label_tarinfos:
f_obj = tfile.extractfile(ltar)
assert f_obj
count = len(f_obj.readlines())
image_name = extract_class_name(ltar.name, 3) + 'jpg'
imageid_to_count[image_name] = count
if count > max_size:
print('Warning: count above threshold')
count = max_size - 1
histogram[count] += 1
# The idea is to divide the counts into buckets of contiguous values,
# such that the number of examples in each bucket is roughly the same.
# In order to do this, we first compute the cumulative sum of the empirical
# distribution of counts, and then divide the cumulative density
# distribution into equally sized buckets. This will make sure that each
# bucket (class) contains rougly the same number of samples.
tot_num_samples = histogram.sum()
cumsum = np.cumsum(histogram)
num_examples_per_bucket = tot_num_samples / _NUM_CLASSES
intervals = []
for cnt in range(1, _NUM_CLASSES):
indices = np.where(cumsum < num_examples_per_bucket * cnt)
assert indices[0].shape[0] > 0
intervals.append(indices[0][-1])
intervals.append(max_size)
count_to_label = []
label = 0
prev_cnt = 0
classname_to_label = dict()
for cnt in range(max_size):
if cnt > intervals[label]:
classname = '%d-%d' % (prev_cnt, cnt)
classname_to_label[classname] = label
label += 1
prev_cnt = cnt + 1
count_to_label.append(label)
classname = '%d-%d' % (prev_cnt, max_size)
classname_to_label[classname] = label
assert label == _NUM_CLASSES - 1
# Extract list of training, valid, test files.
def extract_image_list(sets_path, filename):
with tarfile.open(os.path.join(dataset_path, dfile), 'r:gz') as tfile:
tarinfo = extract_tarinfos(tfile, sets_path, filename)
assert len(tarinfo) == 1
f_obj = tfile.extractfile(tarinfo[0])
assert f_obj
lines = f_obj.readlines()
lines = [line.decode('utf-8')[:-1] for line in lines]
return lines
train_set = extract_image_list(_SETS_PATH, 'training.txt')
valid_set = extract_image_list(_SETS_PATH, 'validation.txt')
test_set = extract_image_list(_SETS_PATH, 'test.txt')
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=classname_to_label,
task_type='regression',
image_type='counting'))
def gen_split(imageset: List[str]):
with tarfile.open(os.path.join(dataset_path, dfile), 'r:gz') as tfile:
tarinfos = [
tarinfo for tarinfo in tfile.getmembers()
if tarinfo.name.split('/')[-1] in imageset]
assert tarinfos
for ti in tarinfos:
f_obj = tfile.extractfile(ti)
image = Image.open(f_obj)
image.load()
imageid = ti.name.split('/')[-1]
count = imageid_to_count[imageid]
label = count_to_label[count]
yield (image, label)
make_gen_fn = lambda: gen_split(train_set)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['dev-test'] = gen_split(valid_set)
per_split_gen['test'] = gen_split(test_set)
return metadata, per_split_gen
trancos_dataset = types.DownloadableDataset(
name='trancos',
download_urls=[
types.DownloadableArtefact(
url='https://universidaddealcala-my.sharepoint.com/:u:/g/personal/gram_uah_es/Eank6osXQgxEqa-1bb0nVsoBc3xO4XDwENc_g0nc6t58BA?&Download=1',
checksum='e9b4d5a62ab1fe5f542ec8326f2d4fda')
],
handler=trancos_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/trancos.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.flickr_material_database."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import flickr_material_database
class FlickrMaterialDatabaseTest(parameterized.TestCase):
@parameterized.parameters([
dict(path='image/wood/wood_object_021_new.jpg', label=9),
dict(path='image/water/water_moderate_023_new.jpg', label=8),
])
def test_path_to_label(self, path, label):
result = flickr_material_database._path_to_label(path)
self.assertEqual(result, label)
def test_handler(self):
fixture_path = self.create_tempdir().full_path
flickr_material_database.write_fixture(fixture_path)
dataset = flickr_material_database.flickr_material_database_dataset
metadata, generators = dataset.handler(fixture_path)
self.assertEqual(metadata.num_classes, 10)
samples = list()
for split in ['train', 'dev', 'dev-test', 'test']:
self.assertIn(split, generators)
samples.extend(list(generators[split]))
self.assertLen(samples, 11)
self.assertTrue(
all(img.size == metadata.image_shape for img, label in samples))
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/flickr_material_database_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Belgium TSC dataset handler."""
import os
from typing import Sequence
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
TRAIN_ZIP_PATH = "BelgiumTSC_Training.zip"
TEST_ZIP_PATH = "BelgiumTSC_Testing.zip"
CLASS_NAMES_PATH = "reducedSetTS.txt"
IGNORED_FILES_REGEX = r".*\.csv$|.*\.txt$"
def belgium_tsc_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports Belgium Traffic Sign Classification dataset.
The dataset home page is at http://people.ee.ethz.ch/~timofter/traffic_signs/.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
with gfile.GFile(os.path.join(artifacts_path, CLASS_NAMES_PATH), "r") as f:
class_names = f.readlines()[1:]
assert len(class_names) == 62
metadata = types.DatasetMetaData(
num_classes=62,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata={
"class_names": class_names,
})
def gen(path):
return extraction_utils.generate_images_from_zip_files(
artifacts_path, [path],
_label_from_path,
ignored_files_regex=IGNORED_FILES_REGEX)
make_gen_fn = lambda: gen(TRAIN_ZIP_PATH)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen["test"] = gen(TEST_ZIP_PATH)
return metadata, per_split_gen
def write_fixture(path: str) -> None:
"""Writes a fixture TSC dataset to the given path."""
with zipfile.ZipFile(os.path.join(path, TRAIN_ZIP_PATH), "w") as zf:
_write_fixture_images(zf, "Training",
[0, 1, 2, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17])
with zipfile.ZipFile(os.path.join(path, TEST_ZIP_PATH), "w") as zf:
_write_fixture_images(zf, "Test", [3, 4, 5, 6])
fake_class_names = ["ABC"] * 62
with gfile.GFile(os.path.join(path, CLASS_NAMES_PATH), "w") as f:
f.write("\n".join(["header", *fake_class_names]))
def _write_fixture_images(zf: zipfile.ZipFile, split_name: str,
class_indices: Sequence[int]) -> None:
for class_index in class_indices:
path = os.path.join(split_name, f"{class_index:05}", "01957_00002.ppm")
with zf.open(path, "w") as f:
image = Image.new("RGBA", size=(50, 50), color=(155, 0, 0))
image.save(f, "ppm")
def _label_from_path(path: str) -> types.Label:
return int(os.path.basename(os.path.dirname(path)))
belgium_tsc_dataset = types.DownloadableDataset(
name="belgium_tsc",
download_urls=[
types.DownloadableArtefact(
url="https://btsd.ethz.ch/shareddata/BelgiumTS/reducedSetTS.txt",
checksum="e6052a024e24060e5cec84fbda34fb5e"),
types.DownloadableArtefact(
url="http://www.vision.ee.ethz.ch/~timofter/BelgiumTSC/BelgiumTSC_Training.zip",
checksum="c727ca9d00e3964ca676286a1808ccee"),
types.DownloadableArtefact(
url="http://www.vision.ee.ethz.ch/~timofter/BelgiumTSC/BelgiumTSC_Testing.zip",
checksum="d208de4566388791c0028da8d6a545cc"),
],
handler=belgium_tsc_handler,
fixture_writer=write_fixture)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/belgium_tsc.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VOC actions handler."""
import os
import tarfile
from typing import Dict, List
from absl import logging
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import tensorflow_datasets as tfds
import xmltodict
_TRAIN_DATA_FNAME = 'VOCtrainval_11-May-2012.tar'
_TEST_DATA_FNAME = 'VOC2012test.tar'
_DIR_PREFIX = 'VOCdevkit/VOC2012/ImageSets/Action'
_ANNOTATIONS_PREFIX = 'VOCdevkit/VOC2012/Annotations'
_IMAGES_PREFIX = 'VOCdevkit/VOC2012/JPEGImages'
_ACTIONS = [
'jumping',
'phoning',
'playinginstrument',
'reading',
'ridingbike',
'ridinghorse',
'running',
'takingphoto',
'usingcomputer',
'walking',
'other',
]
_NUM_CLASSES = 11
def actions_to_ids(actions: Dict[str, str],
label_to_id: Dict[str, int]) -> List[int]:
action_ids = np.zeros((_NUM_CLASSES,))
for action, active in actions.items():
if int(active) == 1:
action_ids[label_to_id[action]] = 1
return np.nonzero(action_ids)[0].tolist()
def _extract_image_fnames_for_subset(subset: str, tf: tarfile.TarFile,
actions: List[str]):
"""Extract image filenames for given subset and a set of actions."""
result = set()
for action in actions:
if action == 'other':
continue
actions_file = tf.extractfile(
os.path.join(_DIR_PREFIX, f'{action}_{subset}.txt'))
if actions_file is not None:
for line in actions_file:
(image_fname, _,
_) = line.decode('utf-8').strip().replace(' ', ' ').split(' ')
result.add(image_fname)
return result
def voc_actions_handler(dataset_path: str) -> types.HandlerOutput:
"""VOC actions dataset handler."""
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_ACTIONS)))
with tarfile.open(os.path.join(dataset_path, _TRAIN_DATA_FNAME), 'r') as tf:
image_fnames = _extract_image_fnames_for_subset('trainval', tf, _ACTIONS)
def make_gen(data_fname, image_fnames):
with tarfile.open(os.path.join(dataset_path, data_fname), 'r') as tf:
for image_fname in image_fnames:
annotations_fname = os.path.join(_ANNOTATIONS_PREFIX,
f'{image_fname}.xml')
try:
annotations_file = tf.extractfile(annotations_fname)
except KeyError:
logging.warning('Skipping: %s', image_fname)
continue
annotations = xmltodict.parse(annotations_file)
if annotations is None:
continue
image = Image.open(
tf.extractfile(os.path.join(_IMAGES_PREFIX, f'{image_fname}.jpg')))
objects = annotations['annotation']['object']
if not isinstance(objects, list):
objects = [objects]
for image_object in objects:
bndbox = image_object['bndbox']
xmin = int(float(bndbox['xmin']))
xmax = int(float(bndbox['xmax']))
ymin = int(float(bndbox['ymin']))
ymax = int(float(bndbox['ymax']))
action_image = image.crop((xmin, ymin, xmax, ymax))
action_annotations = actions_to_ids(image_object['actions'],
label_to_id)
yield types.Example(
image=action_image, multi_label=action_annotations, label=None)
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='multi-label',
image_type='object',
),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=_NUM_CLASSES)),
'png_encoded_image':
tfds.features.Image()
}))
make_gen_fn = lambda: make_gen(_TRAIN_DATA_FNAME, image_fnames)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
# pylint: disable=line-too-long
voc_actions_dataset = types.DownloadableDataset(
name='voc_actions',
download_urls=[
types.DownloadableArtefact(
url='http://pjreddie.com/media/files/VOCtrainval_11-May-2012.tar',
checksum='6cd6e144f989b92b3379bac3b3de84fd'),
types.DownloadableArtefact(
url='http://pjreddie.com/media/files/VOC2012test.tar',
checksum='9065beb292b6c291fad82b2725749fda'
) # This requires authorisation
],
website_url='http://host.robots.ox.ac.uk/pascal/VOC/voc2012',
handler=voc_actions_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/voc_actions.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIT-scenes handler.
See https://paperswithcode.com/dataset/mit-indoors-scenes for more information.
"""
import io
import os
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_ARCHIVE_FNAME = 'indoor-scenes-cvpr-2019.zip'
_TRAIN_IMAGES_FNAME = 'TrainImages.txt'
_TEST_IMAGES_FNAME = 'TestImages.txt'
def _path_to_label_fn(path: str, label_to_id):
label = os.path.split(path)[1].split('_')[0]
return label_to_id[label]
def mit_scenes_handler(dataset_path: str) -> types.HandlerOutput:
"""MIT indoor scenes dataset."""
with zipfile.ZipFile(os.path.join(dataset_path, _ARCHIVE_FNAME), 'r') as zf:
train_images_names = zf.read(_TRAIN_IMAGES_FNAME).decode('utf-8').split(
'\n')
test_images_names = zf.read(_TEST_IMAGES_FNAME).decode('utf-8').split('\n')
labels = [
'office', 'lobby', 'stairscase', 'winecellar', 'church_inside',
'studiomusic', 'shoeshop', 'bowling', 'poolinside', 'nursery',
'meeting_room', 'videostore', 'bathroom', 'library', 'locker_room',
'movietheater', 'children_room', 'concert_hall', 'clothingstore',
'pantry', 'subway', 'prisoncell', 'inside_bus', 'garage', 'warehouse',
'bookstore', 'auditorium', 'laboratorywet', 'tv_studio', 'buffet',
'waitingroom', 'laundromat', 'bedroom', 'greenhouse', 'cloister',
'elevator', 'dining_room', 'hairsalon', 'livingroom', 'deli',
'restaurant_kitchen', 'dentaloffice', 'trainstation', 'casino', 'bar',
'jewelleryshop', 'kitchen', 'museum', 'grocerystore', 'operating_room',
'airport_inside', 'gameroom', 'fastfood_restaurant', 'classroom',
'bakery', 'closet', 'artstudio', 'hospitalroom', 'gym', 'florist',
'inside_subway', 'toystore', 'kindergarden', 'restaurant', 'mall',
'corridor', 'computerroom'
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='scene',
))
def gen(image_names, label_to_id, base_dir='indoorCVPR_09/Images'):
with zipfile.ZipFile(os.path.join(dataset_path, _ARCHIVE_FNAME), 'r') as zf:
for image_name in image_names:
label = label_to_id[image_name.split('/')[0]]
image_path = os.path.join(base_dir, image_name)
image = Image.open(io.BytesIO(zf.read(image_path))).convert('RGB')
image.load()
yield (image, label)
make_gen_fn = lambda: gen(train_images_names, label_to_id)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen(test_images_names, label_to_id)
return metadata, per_split_gen
mit_scenes_dataset = types.DownloadableDataset(
name='mit_scenes',
download_urls=[
types.KaggleDataset(
dataset_name='itsahmad/indoor-scenes-cvpr-2019',
checksum='b5a8ee875edc974ab49f4cad3b8607da')
],
website_url='https://www.kaggle.com/itsahmad/indoor-scenes-cvpr-2019',
handler=mit_scenes_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/mit_scenes.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Path MNIST handler.
Colorectal Histology MNIST dataset.
"""
import os
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
import pandas as pd
from PIL import Image
_HMNIST_FNAME = 'hmnist_28_28_RGB.csv'
_ARCHIVE_FNAME = 'colorectal-histology-mnist.zip'
def _path_to_label_fn(path: str, label_to_id):
label = os.path.split(path)[1].split('_')[0]
return label_to_id[label]
def path_mnist_handler(dataset_path: str) -> types.HandlerOutput:
"""Colorectal Histology MNIST handler."""
datafile = os.path.join(dataset_path, _ARCHIVE_FNAME)
with zipfile.ZipFile(os.path.join(dataset_path, datafile), 'r') as zf:
data = pd.read_csv(zf.open(_HMNIST_FNAME))
def gen():
for _, row in data.iterrows():
img = np.array([row[f'pixel{i:04d}'] for i in range(28 * 28 * 3)
]).reshape((28, 28, 3))
label = row['label'] - 1
img = Image.fromarray(img.astype('uint8'))
yield img, label
metadata = types.DatasetMetaData(
num_classes=8,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='medical',
))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
path_mnist_dataset = types.DownloadableDataset(
name='path_mnist',
download_urls=[
types.KaggleDataset(
dataset_name='kmader/colorectal-histology-mnist',
checksum='e03501016bd54719567dfb954fe982fe')
],
website_url='https://www.kaggle.com/kmader/colorectal-histology-mnist',
handler=path_mnist_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/path_mnist.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers.belgium_tsc."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_nevis.datasets_storage.handlers import belgium_tsc
class BelgiumTscTest(parameterized.TestCase):
@parameterized.parameters([
dict(path='Training/00061/01957_00002.ppm', expected=61),
dict(path='Test/00009/01957_00002.ppm', expected=9),
])
def test_label_from_path(self, path, expected):
result = belgium_tsc._label_from_path(path)
self.assertEqual(result, expected)
def test_belgium_tsc_handler(self):
artifact_dir = self.create_tempdir().full_path
belgium_tsc.write_fixture(artifact_dir)
metadata, generators = belgium_tsc.belgium_tsc_handler(artifact_dir)
self.assertLen(metadata.additional_metadata['class_names'], 62)
self.assertEqual(metadata.num_classes, 62)
for split in ['train', 'dev', 'dev-test', 'test']:
self.assertIn(split, generators)
test_examples = list(generators['test'])
self.assertLen(test_examples, 4)
samples = list()
for split in ['train', 'dev', 'dev-test']:
samples.extend(generators[split])
self.assertLen(samples, 14)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/belgium_tsc_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office-Caltech-10 dataset handler.
Download seemed unreliable.
"""
import re
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
CALTECH_LABELS_TO_ID = {
"backpack": 0,
"touring-bike": 1,
"calculator": 2,
"head-phones": 3,
"computer-keyboard": 4,
"laptop-101": 5,
"computer-monitor": 6,
"computer-mouse": 7,
"coffee-mug": 8,
"video-projector": 9,
}
_OBJECT_CATEGORIES_PATH = "256_ObjectCategories.tar"
# Filnames in the archive look like this:
# 256_ObjectCategories/003.backpack/003_0001.jpg
#
# where 003 is the label-number 1..257 (one-based).
_CALTECH_FILE_PATH_REGEX = re.compile(
r"256_ObjectCategories/(\d\d\d)\.(.+)/(\d\d\d)_(\d\d\d\d)\.jpg")
OFFICE_LABELS_TO_ID = {
"back_pack": 0,
"bike": 1,
"calculator": 2,
"headphones": 3,
"keyboard": 4,
"laptop_computer": 5,
"monitor": 6,
"mouse": 7,
"mug": 8,
"projector": 9
}
_ARCHIVE_FILENAME = "domain_adaptation_images.tar.gz"
# Filnames in the archive look like this:
# domain_adaptation_images/amazon/images/bike/frame_0001.jpg
# domain_adaptation_images/dslr/images/bike/frame_0001.jpg
# domain_adaptation_images/webcam/images/bike/frame_0003.jpg
#
#
# where 003 is the label-number 1..257 (one-based).
_OFFICE_FILE_PATH_REGEX = re.compile(
r"(\w+)/images/(.+)/frame_(\d\d\d\d)\.jpg")
SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST = {"test": 0.5, "dev-test": 0.5}
def office_caltech_10_handler(download_path: str) -> types.HandlerOutput:
"""Imports images from Office31 and Caltech256 and select the overlap classes.
Args:
download_path: Directory containing the downloaded raw data.
Returns:
HandlerOutput
"""
def office_path_to_label_fn(fname):
fname_match = _OFFICE_FILE_PATH_REGEX.match(fname)
if not fname_match:
return None
else:
label_str = fname_match.group(2)
if label_str not in OFFICE_LABELS_TO_ID:
return None
label_id = OFFICE_LABELS_TO_ID[label_str]
return label_id
def caltech_path_to_label_fn(fname):
fname_match = _CALTECH_FILE_PATH_REGEX.match(fname)
if not fname_match:
return None
else:
label_str = fname_match.group(2)
if label_str not in CALTECH_LABELS_TO_ID:
return None
label_id = CALTECH_LABELS_TO_ID[label_str]
return label_id
def office_gen_split():
yield from extraction_utils.generate_images_from_tarfiles(
_ARCHIVE_FILENAME,
working_directory=download_path,
path_to_label_fn=office_path_to_label_fn)
def caltech_gen_split():
yield from extraction_utils.generate_images_from_tarfiles(
_OBJECT_CATEGORIES_PATH,
working_directory=download_path,
path_to_label_fn=caltech_path_to_label_fn,
convert_mode="RGB")
metadata = types.DatasetMetaData(
num_classes=len(CALTECH_LABELS_TO_ID),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_str_to_id=CALTECH_LABELS_TO_ID,
task_type="classification",
image_type="object"))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
office_gen_split,
splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
test_dev_test_gen = splits.random_split_generator_into_splits_with_fractions(
caltech_gen_split,
SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST)
per_split_gen = per_split_gen | test_dev_test_gen
return (metadata, per_split_gen)
office_caltech_10_dataset = types.DownloadableDataset(
name="office_caltech_10",
download_urls=[
types.DownloadableArtefact(
url="https://drive.google.com/u/0/uc?id=1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK&export=download&confirm=y",
checksum="67b4f42ca05d46448c6bb8ecd2220f6d"),
types.DownloadableArtefact(
url="https://drive.google.com/u/0/uc?id=0B4IapRTv9pJ1WGZVd1VDMmhwdlE&export=download&confirm=y",
checksum="1b536d114869a5a8aa4580b89e9758fb")
],
website_url=[
"http://www.vision.caltech.edu/Image_Datasets/Caltech256/",
"https://faculty.cc.gatech.edu/~judy/domainadapt/"
],
paper_title="Geodesic Flow Kernel for Unsupervised Domain Adaptation",
authors="B Gong, Y Shi, F Sha, and K Grauman",
papers_with_code_url="https://paperswithcode.com/dataset/office-caltech-10",
handler=office_caltech_10_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/office_caltech_10.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Olivetti handler."""
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
ARCHIVE_NAME = 'the-orl-database-for-training-and-testing.zip'
def _path_to_label_fn(path: str) -> int:
label = int(path.split('/')[-1].split('.')[0].split('_')[1]) - 1
return label
def olivetti_face_handler(dataset_path: str) -> types.HandlerOutput:
"""Olivetti dataset handler."""
num_classes = 41
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='face',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path, [ARCHIVE_NAME],
path_to_label_fn=_path_to_label_fn,
convert_mode='L')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
olivetti_face_dataset = types.DownloadableDataset(
name='olivetti_face',
download_urls=[
types.KaggleDataset(
dataset_name='tavarez/the-orl-database-for-training-and-testing',
checksum='09871495160825a485b0f2595ba2bb34')
],
website_url='https://www.kaggle.com/tavarez/the-orl-database-for-training-and-testing',
handler=olivetti_face_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/olivetti_face.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synthetic COVID-19 X-Ray dataset."""
import os
from typing import Dict
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id: Dict[str, int]):
dirname = os.path.dirname(path)
if dirname == 'G1':
return label_to_id['Normal']
if dirname == 'G2':
return label_to_id['Pneumonia']
raise ValueError('Unknown label.')
# pylint:disable=missing-function-docstring
def synthetic_covid19_xray_handler(dataset_path: str) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
labels = ['Normal', 'Pneumonia']
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='xray',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
synthetic_covid19_xray_dataset = types.DownloadableDataset(
name='synthetic_covid19_xray',
download_urls=[
types.DownloadableArtefact(
url='https://github.com/hasibzunair/synthetic-covid-cxr-dataset/releases/download/v0.1/G_NC.zip',
checksum='bd82149d00283fca892bc41e997a3070'),
types.DownloadableArtefact(
url='https://github.com/hasibzunair/synthetic-covid-cxr-dataset/releases/download/v0.1/G_PC.zip',
checksum='acf56668f97be81fd8c05c4308f80c61')
],
handler=synthetic_covid19_xray_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/synthetic_covid19_xray.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Magellan Venus Volcanoes dataset handler."""
import os
import re
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = r".*\.spr$|Chips|FOA|GroundTruths|Programs|Tables|README"
def magellan_venus_volcanoes_handler(
artifacts_path: str) -> types.HandlerOutput:
"""Imports Magellan Venus Volcanoes dataset (counting task).
The dataset home page is at
http://archive.ics.uci.edu/ml/datasets/volcanoes+on+venus+-+jartool+experiment
The dataset contains 134 gray-scale images of size 1024x1024.
The creators provide coordinates of the volcanoes together with a label that
expresses the confidence of the presence of each volcanoe.
Some stats: 16 images have no volcanoe, 53 have at least 1 volcanoe with the
highest confidence level (definitely), 85 images have at least a volcanoe with
intermediate confidence level (probably), 104 images have at least a volcanoe
with low confidence level (possibly), and 118 images have at least a volcanoe
with the lowest confidence level (only a pit is visible).
Note the number of volcanoes per image vary between 0 and 62.
For this task, we turn the problem into binary classification. We classify
whether the image most definitely contains a volcanoe or not.
The package is structured as follows:
package/GroundTruths stores the label files. The label file of the first image
is img1.lxyr and it lists in each row all the detected volcanoes in the
format: <confidence_level> <other_info>
We assign a label equal to 1 if there is at least one row starting with 1.
The images are in the folder: package/Images/
For instance, the first image is: img1.sdt
It can be opened in python via:
(np.fromfile(open('img134.sdt'), np.ubyte)).reshape((1024, 1024))
with values in 0, 255.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(artifacts_path)
assert len(files) == 1
label_to_class_index = {"neg": 0, # There is no volcanoe.
"pos": 1} # There is at least a volcanoe.
all_labels = dict()
# extract the label information
with tarfile.open(os.path.join(artifacts_path, files[0]), "r:gz") as tar:
for labelfile in tar.getmembers():
if "GroundTruths" in labelfile.name and labelfile.name.endswith("lxyr"):
f_obj = tar.extractfile(labelfile)
assert f_obj
def _get_label_from_lines(f_obj):
for line in f_obj:
label, *_ = line.decode("utf-8").split(" ")
if label == "1":
return 1
return 0
label = _get_label_from_lines(f_obj)
fullname = labelfile.name.split("/")[-1]
name, _ = os.path.splitext(fullname)
all_labels[name + ".sdt"] = label # image filename -> label
metadata = types.DatasetMetaData(
num_classes=2,
num_channels=1,
image_shape=(),
additional_metadata=dict(
label_to_id=label_to_class_index,
task_type="classification",
image_type="object"))
def path_to_label_fn(path: str) -> types.Label:
fields = path.split("/")
assert len(fields) == 3
return all_labels[fields[-1]]
def make_gen_fn():
with tarfile.open(os.path.join(artifacts_path, files[0]), "r:gz") as tf:
for member in tf:
if member.isdir() or re.search(_IGNORED_FILES_REGEX, member.name):
continue
label = path_to_label_fn(member.path)
f_obj = tf.extractfile(member.name)
assert f_obj
np_image = (np.frombuffer(f_obj.read(), dtype="ubyte")
).reshape((1024, 1024))
image = Image.fromarray(np_image)
yield (image, label)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
magellan_venus_volcanoes_dataset = types.DownloadableDataset(
name="magellan_venus_volcanoes",
download_urls=[
types.DownloadableArtefact(
url="http://archive.ics.uci.edu/ml/machine-learning-databases/volcanoes-mld/volcanoes.tar.gz",
checksum="55143a4ec42b626126c9b4ed618f59f8")
],
handler=magellan_venus_volcanoes_handler,
paper_title="Learning to Recognize Volcanoes on Venus",
authors="M.C. Burl, L. Asker, P. Smyth, U. Fayyad, P. Perona, L. Crumpler and J. Aubele",
year="1998")
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/magellan_venus_volcanoes.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for Brodatz dataset."""
import functools
import os
from typing import List
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
def _get_class_names(filenames: List[str]) -> List[str]:
return sorted([os.path.splitext(f)[0] for f in filenames])
def brodatz_handler(dataset_path: str,
rng_seed: int = 0) -> types.HandlerOutput:
"""Imports Brodatz texture dataset.
We import the 111 original texture images. Each of these images represent one
class. Following the procedure in:
A Training-free Classification Framework for Textures, Writers, and Materials-
R. Timofte1 and L. Van Gool, 2012
we extract from each image 9 non-overlapping regions.
In order to ensure all the classes are presented in all the splits, we
randomly select out of these 9 samples:
- 5 samples for train
- 1 sample for dev
- 1 sample for dev-test
- 2 samples for test
These sets are non-overlapping by construction.
Link: https://www.ux.uis.no/~tranden/brodatz.html
Args:
dataset_path: Path with downloaded datafiles.
rng_seed: Seed for random number generator.
Returns:
Metadata and generator functions.
"""
filenames = gfile.listdir(dataset_path)
class_names = _get_class_names(filenames)
metadata = types.DatasetMetaData(
num_channels=1,
num_classes=len(filenames),
image_shape=(), # Ignored for now.
preprocessing='random_crop', # select random crops in the images
additional_metadata=dict(
labels=class_names,
task_type='classification',
image_type='texture'
))
def gen(rng_seed=0):
rng = np.random.default_rng(rng_seed)
for idx, f in enumerate(filenames):
splits_idx = rng.permutation(range(9))
im = Image.open(os.path.join(dataset_path, f))
im.load()
w, h = im.size
k = -1
for i in range(0, w, int(w/3)):
for j in range(0, h, int(h/3)):
# TODO: Write a function for computing the box coordinates
# and test it.
box = (i, j, i + int(w/3), j + int(h/3))
if i + int(w/3) < w and j + int(h/3) < h:
k += 1
image = im.crop(box)
if k in splits_idx[:5]:
yield image, idx, 'train'
if k == splits_idx[5]:
yield image, idx, 'dev'
if k == splits_idx[6]:
yield image, idx, 'dev-test'
if k in splits_idx[7:]:
yield image, idx, 'test'
def select_subset(gen, subsets):
for image, label, split in gen:
if split in subsets:
yield image, label
per_split_gen = dict()
per_split_gen['train'] = select_subset(gen(rng_seed), ['train',])
per_split_gen['dev'] = select_subset(gen(rng_seed), ['dev',])
per_split_gen['train_and_dev'] = select_subset(gen(rng_seed),
['train', 'dev'])
per_split_gen['dev-test'] = select_subset(gen(rng_seed), [
'dev-test',
])
per_split_gen['test'] = select_subset(gen(rng_seed), ['test',])
return (metadata, per_split_gen)
brodatz_dataset = types.DownloadableDataset(
name='brodatz',
download_urls=[
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D1.gif',
checksum='d5b7a11b6c2e21d4869626e2e43a6c76'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D2.gif',
checksum='1de256b931c57e40c7bc9c3f64c6a77a'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D3.gif',
checksum='ec3927c4f532f88f069700f8d6adfddd'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D4.gif',
checksum='1ef331b11c9a3b85c34f4be1852b69e5'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D5.gif',
checksum='8c6b91aee71dfcd66aee551e6a0609e0'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D6.gif',
checksum='e6bb6971f81d319f623615d5694b3209'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D7.gif',
checksum='c2fcd13fc32c2b631343445c0e230020'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D8.gif',
checksum='801dade42334cac045e04a235f2986da'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D9.gif',
checksum='dc65d59935048475ad4867d84ebbfa54'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D10.gif',
checksum='75778b4707eb295101464113d78bec6e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D11.gif',
checksum='f659294380d33fa8752ac6b070d9c55b'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D12.gif',
checksum='58a68e7fcdb1b0c32b6bb085ed3fe464'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D13.gif',
checksum='74762bcca81edf3683d91871d4863898'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D15.gif',
checksum='468611c6987f098b984bee1ef5feece5'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D16.gif',
checksum='bd81a2680d168ed1bd156b7d840a7e0e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D17.gif',
checksum='4fa54c530e545ea9f6f3a9572e2197c7'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D18.gif',
checksum='1649654b700b0ec8dea92d937db90e07'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D19.gif',
checksum='c49ee81d5ac0241cc42765fbb9367140'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D20.gif',
checksum='166d44aa57308f1044d5d6009d85964e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D21.gif',
checksum='070b869c8f38f6005595c062c09dd29e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D22.gif',
checksum='d5fc9a65b2a66afa641375e005b1c3a8'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D23.gif',
checksum='68f3c724340a17cc9b71ccbbef2c625a'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D24.gif',
checksum='553f108617063d3bae59dbc0842d40a6'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D25.gif',
checksum='983966e908c0bb871d0c7eeb87d842eb'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D26.gif',
checksum='25cb81107a1344bb0df5bb700ea0d545'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D27.gif',
checksum='b4ad552c8153121f54311e0bf4d71742'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D28.gif',
checksum='bcb1f90c91e63232fc482e861ad2a5ef'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D29.gif',
checksum='97c091cf6bd85df9953fbacf4c26e653'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D30.gif',
checksum='f640ea4d19451070ab7521d01fe0443c'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D31.gif',
checksum='f2b021904c5657adff2f0ccd3c174da2'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D32.gif',
checksum='2c106006421fd382c8bb7d0dde4a7757'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D33.gif',
checksum='b87d02748fc35987ad31848eaa018309'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D34.gif',
checksum='bfa73bb2478c5197a4841b26bbee319a'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D35.gif',
checksum='708271b6fb9eff6ddb04ecd8144df2a1'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D36.gif',
checksum='7e61234cc1f705872c7157197950f473'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D37.gif',
checksum='efa7f55b325e0ec0adddfe2f13feb59f'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D38.gif',
checksum='41a7f446618790981a7126ec4f11a3dc'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D39.gif',
checksum='bf7c79d4bebfa5e39833e3d19722f906'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D40.gif',
checksum='00916ab1171c486b2aaa97dff111b767'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D41.gif',
checksum='09df782a9078662fad344fc00ebf15ef'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D42.gif',
checksum='5c0c9878f3404e9f827e5b36e9e4bd78'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D43.gif',
checksum='a3bb8e0a94e7bdf50bb202db4bbfd7cd'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D44.gif',
checksum='fe9671ef6e3a847cacc014b4e34aed3a'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D45.gif',
checksum='983b7bc79ce0510cce14ec1b1e16fa11'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D46.gif',
checksum='bd9ff64e7e4d49f213d8ee68e4c96a73'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D47.gif',
checksum='8153b39e1b9d535d7b5617f8af824770'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D48.gif',
checksum='91fc6fc1df6984f2ee4caa0878f79e61'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D49.gif',
checksum='9bf59efe485d20cefe403e22981bdf5f'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D50.gif',
checksum='3f51cb54e0812916aab4dd7a3ff1d53f'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D51.gif',
checksum='845012e87e736e7c086d860c3438395e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D52.gif',
checksum='b3e50f0ec2fd1a6fedb01633e45e723c'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D53.gif',
checksum='fea16eb7f88bef7b8f59cb773c109a1e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D54.gif',
checksum='7ce97a6514196ec55b5641ca6da128e4'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D55.gif',
checksum='83a53d7f3ed452d84bd24a5feb16ca82'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D56.gif',
checksum='e05b6a4d4118a69e8c1dc21f367a2837'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D57.gif',
checksum='ecf251b805b618c92f66eeaa42f3a555'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D58.gif',
checksum='ed0ed6bf2f9d76f7763dfd2a59ade2d7'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D59.gif',
checksum='ceabd5df9baeb447be7206e8f40b59c9'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D60.gif',
checksum='18a2dea676bc12c9dfce54859a194132'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D61.gif',
checksum='1f85e3af72d92fd24b8518f70f235150'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D62.gif',
checksum='6f79c57fa556d4b449583787e2dcad28'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D63.gif',
checksum='eb7ee131280bffc2b2416e719c848402'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D64.gif',
checksum='80e74de3008d706d95097ea1e9f0c47c'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D65.gif',
checksum='5a3d6019a57aab602339b7ce28b185da'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D66.gif',
checksum='ad7eb2e9504c2f6aa420e902bf281e8b'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D67.gif',
checksum='04b67bcc065507358a1cd139f6330386'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D68.gif',
checksum='2de16cc286ca056234c9d961db6faf29'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D69.gif',
checksum='847e8b08e204e51b9f6b37b27eb665e2'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D70.gif',
checksum='e71ea4d910079d8076282a808447b092'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D71.gif',
checksum='748bfdec8178eb759283b20f8f18c7b7'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D72.gif',
checksum='aab06dd699291cb98bc9bf3858c5c8e2'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D73.gif',
checksum='0fb12645d29c83e397bad8a4c2a02641'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D74.gif',
checksum='84bc434804e1af9e0ebebe35e268fe63'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D75.gif',
checksum='1925755a7bbb8c63eb84b1f764633040'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D76.gif',
checksum='f1ad969319f6fc6bd7282c3a665580f1'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D77.gif',
checksum='4a0bcc6bdb82e5a2021d1fd674143309'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D78.gif',
checksum='84891367765a6645da6bf344960d3367'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D79.gif',
checksum='23fe0d9309572a9398ab76585dfed68c'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D80.gif',
checksum='cda3fa8f9beb4ebd7b1214ae52d63007'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D81.gif',
checksum='03f9a63e224777b8fc6a831e294eb191'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D82.gif',
checksum='71ff43df59c976685594c2699a7ca285'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D83.gif',
checksum='72b08177d945df0f19fd7dee6d7d3199'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D84.gif',
checksum='71cfd495fe5697ba2c97584362c763d7'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D85.gif',
checksum='ec10e406c98376ef453e8ff84cd17ab7'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D86.gif',
checksum='c8ae9a9b08c34f10c98e10b8fbe3faa4'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D87.gif',
checksum='efd18bd1b96786cd0c1154d3b6607112'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D88.gif',
checksum='7388e9b96303c330363a127b5f86de9a'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D89.gif',
checksum='3fbfb4fcacd97bd8ff4d16c865e4d1c5'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D90.gif',
checksum='9799b578783825b275a016d3f73f5ee9'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D91.gif',
checksum='5ce405b9a67949c358b8425ad0eb043d'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D92.gif',
checksum='0e1ad08968c216ec63989cea2ed97591'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D93.gif',
checksum='51651d6a16ffac5eada215a9828b47dd'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D94.gif',
checksum='7e665d45c5d145b9677501699ccc6ef9'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D95.gif',
checksum='78d6a78e47f05bb0ae28a926310a3869'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D96.gif',
checksum='40633ecff095460e126aa30e55e2b914'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D97.gif',
checksum='4dbd7162f540bf106e8287b585798341'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D98.gif',
checksum='5f11c141eb653f7401f9dd28e88cb73c'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D99.gif',
checksum='ff60fd7aa813f72b8ef0cac840db6761'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D100.gif',
checksum='8e5e62d263ce3bad21d8c01ac7c0faa5'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D101.gif',
checksum='c8b21ce148aafb82635cb18966b0eac4'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D102.gif',
checksum='fa6ac2cf66fe51318209ac74d9a08dee'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D103.gif',
checksum='5976a960557eca00559042b0041921dd'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D104.gif',
checksum='f0565aeebc36cad137af950125001082'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D105.gif',
checksum='6f27031fed8269dd0fb9d36572eb84de'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D106.gif',
checksum='b338b00b68eec8d35b14b82d5eef2ba8'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D107.gif',
checksum='3d5c5fe771dab76f041cf58b4b7f95e8'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D108.gif',
checksum='98e8c0881b909259cc812d0ee1a7f700'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D109.gif',
checksum='3b0b3b050c09f5505f6c5079a702d87e'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D110.gif',
checksum='4df933730394c919130a051ef1b5cd53'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D111.gif',
checksum='459692fc135ea149fe40470d75c2f8ca'),
types.DownloadableArtefact(
url='http://www.ux.uis.no/~tranden/brodatz/D112.gif',
checksum='6c4cedeb6915d76742fb224a44293dd6')
],
handler=functools.partial(brodatz_handler, rng_seed=0))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/brodatz.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CVC-MUSCIMA handler."""
import io
import os
from typing import Dict
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_LABELS = [
'w-23', 'w-14', 'w-18', 'w-11', 'w-40', 'w-42', 'w-28', 'w-03', 'w-05',
'w-34', 'w-36', 'w-37', 'w-12', 'w-31', 'w-06', 'w-22', 'w-25', 'w-21',
'w-38', 'w-48', 'w-32', 'w-07', 'w-39', 'w-15', 'w-10', 'w-17', 'w-45',
'w-50', 'w-02', 'w-08', 'w-01', 'w-20', 'w-35', 'w-29', 'w-46', 'w-47',
'w-13', 'w-30', 'w-33', 'w-09', 'w-16', 'w-49', 'w-43', 'w-44', 'w-24',
'w-19', 'w-04', 'w-26', 'w-41', 'w-27'
]
def _path_to_label_fn(path: str, label_to_id: Dict[str, int]) -> int:
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
_IMAGE_PREFIX = 'CVCMUSCIMA_WI/PNG_GT_Gray'
_PARTITIONS_FNAME = 'Partitions_Set.zip'
_IMAGES_FNAME = 'CVCMUSCIMA_WI.zip'
_TEST_SPLIT_FNAME = 'Partitions_Set/set_2_Independent/set_2_testing_01.txt'
_TRAIN_SPLIT_FNAME = 'Partitions_Set/set_2_Independent/set_2_training_01.txt'
def cvc_muscima_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for CVC-MUSCIMA dataset."""
with zipfile.ZipFile(os.path.join(dataset_path, _PARTITIONS_FNAME),
'r') as zf:
with zf.open(_TRAIN_SPLIT_FNAME) as f:
train_fnames = {line.decode('utf-8').strip() for line in f}
with zf.open(_TEST_SPLIT_FNAME) as f:
test_fnames = {line.decode('utf-8').strip() for line in f}
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=50,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='ocr',
))
def make_gen(fnames, label_to_id):
with zipfile.ZipFile(os.path.join(dataset_path, _IMAGES_FNAME), 'r') as zf:
for member in zf.infolist():
if member.is_dir():
continue
fname = member.filename
image_fname = os.path.basename(fname)
if not fname.startswith(_IMAGE_PREFIX):
continue
label = os.path.basename(os.path.dirname(fname))
if os.path.join(label, image_fname) not in fnames:
continue
label = label_to_id[label]
image = Image.open(io.BytesIO(zf.read(member)))
image.load()
yield (image, label)
make_gen_fn = lambda: make_gen(train_fnames, label_to_id)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen(test_fnames, label_to_id)
return metadata, per_split_gen
cvc_muscima_dataset = types.DownloadableDataset(
name='cvc_muscima',
download_urls=[
types.DownloadableArtefact(
url='http://wwwo.cvc.uab.es/cvcmuscima/CVCMUSCIMA_WI.zip',
checksum='33d7464a3dc376a9456bbfe7aad8c18f'),
types.DownloadableArtefact(
url='http://wwwo.cvc.uab.es/cvcmuscima/Partitions_Set.zip',
checksum='dd22cff47fd50ca01ee077b757a978cd')
],
website_url='http://wwwo.cvc.uab.es/cvcmuscima/index_database.html',
handler=cvc_muscima_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/cvc_muscima.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semeion handler."""
import os
from typing import Tuple
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
from tensorflow.io import gfile
_SEMION_FNAME = 'semeion.data'
_NUM_ATTRIBUTES = 256
_IMG_SHAPE = (16, 16)
_NUM_CLASSES = 10
def semeion_handler(dataset_path: str) -> types.HandlerOutput:
"""Semeion dataset handler."""
metadata = types.DatasetMetaData(
num_classes=10,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr',
))
def make_gen_fn():
with gfile.GFile(os.path.join(dataset_path, _SEMION_FNAME), 'r') as f:
for line in f:
image, label = _parse_image_and_label(line)
yield (image, label)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
def _parse_image_and_label(line: str) -> Tuple[Image.Image, int]:
"""Parses an image and a label from the line."""
# The data in the line corresponds to 256 binary (float) pixel values followed
# by one-hot encoding of the label which is represented by a binary vector of
# size 10.
unparsed_line = line.strip().split(' ')
image_data = []
for i in range(_NUM_ATTRIBUTES):
image_data.append(int(float(unparsed_line[i])))
image_array = np.reshape(np.array(image_data), _IMG_SHAPE).astype(np.uint8)
# Original array is in range [0,1]
image_array *= 255
image = Image.fromarray(image_array)
labels = []
for i in range(_NUM_CLASSES):
labels.append(int(unparsed_line[i + _NUM_ATTRIBUTES]))
label = np.argmax(labels)
return (image, label)
semeion_dataset = types.DownloadableDataset(
name='semeion',
download_urls=[
types.DownloadableArtefact(
url='https://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data',
checksum='cb545d371d2ce14ec121470795a77432')
],
website_url='https://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit',
handler=semeion_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/semeion.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPMI handler."""
import os
from typing import Dict
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_ZIP_FILENAME = 'norm_ppmi_12class.zip'
_IGNORED_FILES_REGEX_TEST = r'^README$|train'
_IGNORED_FILES_REGEX_TRAIN = r'^README$|test'
def _label_from_fname(filename: str, label_to_id: Dict[str, int]) -> int:
"""Extracts a label given a filename for the PPMI dataset."""
label_str = _get_prefix(filename)
label = label_to_id[label_str]
return label
def _get_prefix(path: str) -> str:
pieces = path.split(os.sep)
if len(pieces) >= 3:
return os.path.join(pieces[0], pieces[1], pieces[2])
else:
return ''
def ppmi_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports PPMI dataset.
The dataset home page is at
https://ai.stanford.edu/~bangpeng/ppmi.html#:~:text=People%20Playing%20Musical%20Instrument&text=The%20PPMI%20dataset%20contains%20images,saxophone%2C%20trumpet%2C%20and%20violin.
The dataset comes as a single zip file containing two directories, one for
images of people playing the instrument and the other with people holding the
instrument. Images can be found in paths like:
{play|with}_instrument/{violin|bassoon|...}/{train|test}/filenmame.jpg
in total there are 24 classes.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
ds_file = gfile.listdir(dataset_path)
assert len(ds_file) == 1
assert ds_file[0] == _ZIP_FILENAME
label_to_id = {}
num_classes = 0
with zipfile.ZipFile(os.path.join(dataset_path, ds_file[0]), 'r') as zf:
dirs = list(set([os.path.dirname(x) for x in zf.namelist()]))
labels = []
for x in dirs:
prefix = _get_prefix(x)
if prefix:
labels.append(prefix)
labels = list(set(labels))
num_classes = len(labels)
for i, label in enumerate(labels):
label_to_id[label] = i
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object'))
def make_gen(ignore_files_regex):
label_fn = lambda x: _label_from_fname(filename=x, label_to_id=label_to_id)
return utils.generate_images_from_zip_files(
dataset_path,
ds_file,
path_to_label_fn=label_fn,
ignored_files_regex=ignore_files_regex)
make_gen_fn = lambda: make_gen(_IGNORED_FILES_REGEX_TRAIN)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen(_IGNORED_FILES_REGEX_TEST)
return metadata, per_split_gen
ppmi_dataset = types.DownloadableDataset(
name='ppmi',
download_urls=[
types.DownloadableArtefact(
url='http://vision.stanford.edu/Datasets/norm_ppmi_12class.zip',
checksum='88118d8c6b50d72f0bb37a89269185ab')
],
website_url='https://ai.stanford.edu/~bangpeng/ppmi.html#:~:text=People%20Playing%20Musical%20Instrument&text=The%20PPMI%20dataset%20contains%20images,saxophone%2C%20trumpet%2C%20and%20violin.',
paper_title='Grouplet: A Structured Image Representation for Recognizing Human and Object Interactions.',
authors='Bangpeng Yao and Li Fei-Fei.',
year='2010',
handler=ppmi_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/ppmi.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KTH-TIPS datasets handler."""
import functools
import os
import tarfile
from typing import Sequence
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _get_class_name_list(fname: str) -> Sequence[str]:
class_name_list = []
with tarfile.open(fname, 'r') as tf:
for member in tf.getmembers():
if member.isdir() and '/' in member.path and 'sample' not in member.path:
class_name_list.append(os.path.split(member.path)[-1])
class_name_list.sort()
return class_name_list
def _get_class_name_for_image(path: str) -> str:
if 'sample' in path:
# KTH-TIPS2-a/wool/sample_a/22a-scale_10_im_10_col.png -> wool
return os.path.split(os.path.split(os.path.dirname(path))[0])[-1]
else:
# KTH_TIPS/linen/44-scale_3_im_1_col.png -> linen
return os.path.split(os.path.dirname(path))[-1]
def _path_to_label_fn(classe_name_list):
def _path_to_label(path):
if '.png' in path:
class_name = _get_class_name_for_image(path)
return classe_name_list.index(class_name)
else:
return None
return _path_to_label
def kth_tips_handler(dataset_path: str,
is_grey: bool) -> types.HandlerOutput:
"""Imports KTH-TIPS datasets.
Link: https://www.csc.kth.se/cvap/databases/kth-tips/index.html
This handler is valid for 4 datastes: KTH-TIPS (Colored and Grey),
KTH-TIPS-2a, KTH-TIPS-2b
Args:
dataset_path: Path with downloaded datafiles.
is_grey: True if images are grey, False otherwise.
Returns:
Metadata and generator functions.
"""
fname = os.path.join(dataset_path, gfile.listdir(dataset_path)[0])
class_name_list = _get_class_name_list(fname)
metadata = types.DatasetMetaData(
num_channels=1 if is_grey else 3,
num_classes=len(class_name_list),
image_shape=(), # Ignored for now.
preprocessing='random_crop',
additional_metadata=dict(
labels=class_name_list,
task_type='classification',
image_type='texture'
))
def gen():
return utils.generate_images_from_tarfiles(
fname,
working_directory=dataset_path,
path_to_label_fn=_path_to_label_fn(class_name_list))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
# These 4 datasets contain different data from different artifacts but share
# the same logic
kth_tips_grey_dataset = types.DownloadableDataset(
name='kth_tips_grey',
download_urls=[
types.DownloadableArtefact(
url='https://www.csc.kth.se/cvap/databases/kth-tips/kth_tips_grey_200x200.tar',
checksum='3aab2bffd539865b237cb3a63dffb14a')
],
handler=functools.partial(kth_tips_handler, is_grey=True))
kth_tips_dataset = types.DownloadableDataset(
name='kth_tips',
download_urls=[
types.DownloadableArtefact(
url='https://www.csc.kth.se/cvap/databases/kth-tips/kth_tips_col_200x200.tar',
checksum='4f92fe540feb4f3c66938291e4516f6c')
],
handler=functools.partial(kth_tips_handler, is_grey=False))
kth_tips_2a_dataset = types.DownloadableDataset(
name='kth_tips_2a',
download_urls=[
types.DownloadableArtefact(
url='https://www.csc.kth.se/cvap/databases/kth-tips/kth-tips2-a_col_200x200.tar',
checksum='911eb17220748fa36e6524aea71db7d7')
],
handler=functools.partial(kth_tips_handler, is_grey=False))
kth_tips_2b_dataset = types.DownloadableDataset(
name='kth_tips_2b',
download_urls=[
types.DownloadableArtefact(
url='https://www.csc.kth.se/cvap/databases/kth-tips/kth-tips2-b_col_200x200.tar',
checksum='00470a104a57f5a5be22cc8a0f234c4e')
],
handler=functools.partial(kth_tips_handler, is_grey=False))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/kth_tips.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Food 101 N handler."""
import io
import os
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_FOOD_FNAME = 'Food-101N_release.zip'
_TRAIN_CLASSES_FNAME = 'Food-101N_release/meta/verified_train.tsv'
_TEST_CLASSES_FNAME = 'Food-101N_release/meta/verified_val.tsv'
_NUM_CLASSES = 101
_IMAGE_PREFIX = 'Food-101N_release/images'
_LABELS = [
'takoyaki', 'bruschetta', 'lobster_bisque', 'bread_pudding', 'scallops',
'pancakes', 'donuts', 'ceviche', 'grilled_salmon', 'ravioli', 'prime_rib',
'waffles', 'eggs_benedict', 'beef_tartare', 'chicken_wings', 'clam_chowder',
'panna_cotta', 'ramen', 'french_fries', 'seaweed_salad', 'lasagna',
'fried_calamari', 'deviled_eggs', 'carrot_cake', 'strawberry_shortcake',
'chocolate_mousse', 'poutine', 'beignets', 'caesar_salad', 'bibimbap',
'garlic_bread', 'cheese_plate', 'shrimp_and_grits', 'caprese_salad',
'beet_salad', 'dumplings', 'macarons', 'churros', 'samosa', 'creme_brulee',
'miso_soup', 'french_onion_soup', 'risotto', 'pulled_pork_sandwich',
'hot_and_sour_soup', 'onion_rings', 'spaghetti_bolognese', 'edamame',
'beef_carpaccio', 'steak', 'grilled_cheese_sandwich', 'peking_duck',
'frozen_yogurt', 'mussels', 'red_velvet_cake', 'oysters', 'greek_salad',
'foie_gras', 'pho', 'spaghetti_carbonara', 'pad_thai', 'huevos_rancheros',
'sashimi', 'sushi', 'gnocchi', 'hummus', 'pork_chop', 'falafel',
'chicken_curry', 'breakfast_burrito', 'club_sandwich', 'cannoli',
'chocolate_cake', 'fried_rice', 'apple_pie', 'guacamole',
'macaroni_and_cheese', 'hot_dog', 'cup_cakes', 'paella', 'ice_cream',
'escargots', 'spring_rolls', 'crab_cakes', 'croque_madame', 'hamburger',
'baby_back_ribs', 'baklava', 'pizza', 'filet_mignon', 'cheesecake',
'lobster_roll_sandwich', 'tiramisu', 'omelette', 'tacos', 'nachos', 'gyoza',
'chicken_quesadilla', 'french_toast', 'tuna_tartare', 'fish_and_chips'
]
def food101n_handler(dataset_path: str) -> types.HandlerOutput:
"""Food 101 N dataset handler."""
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
with zipfile.ZipFile(os.path.join(dataset_path, _FOOD_FNAME)) as zf:
train_lines = zf.read(_TRAIN_CLASSES_FNAME).decode('utf-8').split(
'\n')[1:-1]
train_files = {line.strip().split('\t')[0] for line in train_lines}
test_lines = zf.read(_TEST_CLASSES_FNAME).decode('utf-8').split('\n')[1:-1]
test_files = {line.strip().split('\t')[0] for line in test_lines}
def make_gen(split_fnames, class_name_to_label):
with zipfile.ZipFile(os.path.join(dataset_path, _FOOD_FNAME)) as zf:
for fname in zf.namelist():
if not fname.startswith(_IMAGE_PREFIX):
continue
label_name = os.path.basename(os.path.dirname(fname))
image_fname = os.path.join(label_name, os.path.basename(fname))
if image_fname not in split_fnames:
continue
image = Image.open(io.BytesIO(zf.read(fname))).convert('RGB')
label = class_name_to_label[label_name]
yield types.Example(image=image, label=label, multi_label=None)
make_gen_fn = lambda: make_gen(train_files, label_to_id)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = make_gen(test_files, label_to_id)
return metadata, per_split_gen
food101n_dataset = types.DownloadableDataset(
name='food101n',
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/u/0/uc?id=1s9NyXpi9bO-UZUvk65WVvEUExNkZ1v9V&export=download&confirm=y',
checksum='596b41b48de43342ef1efbb2fd508e06')
],
website_url='https://kuanghuei.github.io/Food-101N/',
handler=food101n_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/food101n.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FGVC Aircraft dataset handler."""
import enum
import functools
import os
import re
import tarfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
# TODO: revisit to have one single dataset instead of 3.
_FAMILY_CLASS_NAMES = 'fgvc-aircraft-2013b/data/families.txt'
_MANUFACTURER_CLASS_NAMES = 'fgvc-aircraft-2013b/data/manufacturers.txt'
_VARIANT_CLASS_NAMES = 'fgvc-aircraft-2013b/data/variants.txt'
_IDX_CLASS_NAME_REGEX = r'^(\d+) (.+)$'
class LabelCategory(enum.Enum):
FAMILY = 0
MANUFACTURER = 1
VARIANT = 2
def _get_split_filename(label_catgory: LabelCategory, split: str) -> str:
return 'images_' + label_catgory.name.lower() + '_' + split
def _get_labels_filename(label_catgory: LabelCategory):
if label_catgory == LabelCategory.FAMILY:
return _FAMILY_CLASS_NAMES
if label_catgory == LabelCategory.MANUFACTURER:
return _MANUFACTURER_CLASS_NAMES
if label_catgory == LabelCategory.VARIANT:
return _VARIANT_CLASS_NAMES
def _get_class_name_list(fname: str, label_catgory: LabelCategory):
"""Get the label list for the label type."""
labels_fname = _get_labels_filename(label_catgory)
lines = []
with tarfile.open(fname, 'r') as tfile:
for member in tfile.getmembers():
if member.isdir():
continue
if labels_fname in member.path:
f_obj = tfile.extractfile(member)
if f_obj:
lines = f_obj.readlines()
return [l.decode('utf-8').strip() for l in lines]
def _extract_idx_class_name_from_line(line: str):
# `1345202 Cessna Citation` -> `Cessna Citation`
match = re.match(_IDX_CLASS_NAME_REGEX, line)
if not match:
raise ValueError(f'Failed to match index and class for {line}')
return match.groups()
def _get_idx_class_names_for_split(
fname: str,
label_catgory: LabelCategory,
split: str):
"""Get the image filenames and corresponding labels in the split."""
split_fname = _get_split_filename(label_catgory, split)
idx_class_names = {'idx': [], 'class_names': []}
with tarfile.open(fname, 'r') as tfile:
for member in tfile.getmembers():
if member.isdir():
continue
if split_fname in member.path:
f_obj = tfile.extractfile(member)
if f_obj:
lines = f_obj.readlines()
for l in lines:
i, class_name = _extract_idx_class_name_from_line(l.decode('utf-8'))
idx_class_names['idx'].append(int(i))
idx_class_names['class_names'].append(class_name)
return idx_class_names
def _extract_im_index_from_path(path: str) -> int:
# 'fgvc-aircraft-2013b/data/images/1236289.jpg' -> 1236289
return int(os.path.splitext(os.path.basename(path))[0])
def _fn_extract_label_from_path(class_name_list, idx_class_names):
"""Returns a function to get an integer label from path."""
def _extract_label_from_path(path):
label = None
if 'images/' in path:
im_index = _extract_im_index_from_path(path)
try:
i = idx_class_names['idx'].index(im_index)
class_name = idx_class_names['class_names'][i]
label = class_name_list.index(class_name)
except ValueError:
pass
return label
return _extract_label_from_path
def fgvc_aircraft_handler(dataset_path: str,
label_catgory: LabelCategory) -> types.HandlerOutput:
"""Imports FGVC Aircraft dataset.
Link: https://paperswithcode.com/dataset/fgvc-aircraft-1
The dataset comes with three label types (from finer to coarser):
- Variant, e.g. Boeing 737-700. A variant collapses all the models that are
visually indistinguishable into one class.
- Family, e.g. Boeing 737.
- Manufacturer, e.g. Boeing.
For each type, 4 splits are provided, train, val, trainval and test. We
keep only test and trainval.
Args:
dataset_path: Path with downloaded datafiles.
label_catgory: One of ('family', 'manufacturer', 'variant').
Returns:
Metadata and generator functions.
"""
label_valid_types = frozenset({LabelCategory.FAMILY,
LabelCategory.MANUFACTURER,
LabelCategory.VARIANT})
assert label_catgory in label_valid_types, 'Unexpected label type'
fname = os.path.join(dataset_path, gfile.listdir(dataset_path)[0])
class_name_list = _get_class_name_list(fname, label_catgory)
metadata = types.DatasetMetaData(
num_channels=3,
num_classes=len(class_name_list),
image_shape=(), # Ignored for now.
additional_metadata=dict(
labels=class_name_list,
task_type='classification',
image_type='object'
))
def gen_data_for_splits(fname, split):
split_idx_class_names = _get_idx_class_names_for_split(
fname, label_catgory, split)
return utils.generate_images_from_tarfiles(
fname,
working_directory=dataset_path,
path_to_label_fn=_fn_extract_label_from_path(
class_name_list, split_idx_class_names))
make_gen_fn = lambda: gen_data_for_splits(fname, 'trainval')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_data_for_splits(fname, 'test')
return (metadata, per_split_gen)
# TODO: redundant DL
fgvc_aircraft_family_dataset = types.DownloadableDataset(
name='fgvc_aircraft_family',
download_urls=[types.DownloadableArtefact(
url='https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz',
checksum='d4acdd33327262359767eeaa97a4f732')],
handler=functools.partial(
fgvc_aircraft_handler, label_catgory=LabelCategory.FAMILY))
fgvc_aircraft_manufacturer_dataset = types.DownloadableDataset(
name='fgvc_aircraft_manufacturer',
download_urls=[types.DownloadableArtefact(
url='https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz',
checksum='d4acdd33327262359767eeaa97a4f732')],
handler=functools.partial(
fgvc_aircraft_handler, label_catgory=LabelCategory.MANUFACTURER))
fgvc_aircraft_variant_dataset = types.DownloadableDataset(
name='fgvc_aircraft_variant',
download_urls=[types.DownloadableArtefact(
url='https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/archives/fgvc-aircraft-2013b.tar.gz',
checksum='d4acdd33327262359767eeaa97a4f732')],
handler=functools.partial(
fgvc_aircraft_handler, label_catgory=LabelCategory.VARIANT))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/fgvc_aircraft.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extended Yale-B dataset handler."""
import os
from typing import List
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import types
_POSES = {
'train': ['P00', 'P02', 'P03', 'P04', 'P07'],
'dev': ['P05'],
'train_and_dev': ['P00', 'P02', 'P03', 'P04', 'P07', 'P05'],
'dev-test': ['P01'],
'test': ['P06', 'P08']
}
_IGNORED_FILES_REGEX = r'info$|Ambient\.pgm$'
_SPLITS = ['train', 'dev', 'dev-test', 'train_and_dev', 'test']
_NUM_CLASSES = 28
_FILES_ID_RANGE = (11, 40)
_MISSING_ID = 14
def _get_all_class_names(directories: List[str]) -> List[str]:
names = set()
for fname in directories:
fname = fname.split('/')[-2]
names.add(fname)
return sorted(names)
def extended_yaleb_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Extended Yale-B dataset.
This is a face identification dataset. There are 28 subjects and images are
taken under different viewing angle and illumination.
We are going to split the dataset based on pose information. The meaning of
the pose id is explained here:
http://vision.ucsd.edu/~leekc/ExtYaleDatabase/Yale%20Face%20Database.htm
Essentially, we are taking the more frontal poses for training and using the
more extreme poses for validation and testing.
The task is to identify which one of the training subjects is present in the
input image.
There is one zip folder per subject. Inside of each these folders there are
several pgm gray scale images, but also other files (an ambient image without
the subject, *info files with the list of files).
An example of filename is: yaleB39_P03A+070E-35.pgm in the format:
yaleB<subject_id=39>P<pose_id=03>A<azimuth_value>E<elevaltion_value>.pgm
Link:
http://vision.ucsd.edu/~leekc/ExtYaleDatabase/ExtYaleB.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
zip_name = os.path.join(dataset_path, 'extended-yale-dataset-b.zip')
with zipfile.ZipFile(zip_name, 'r') as z:
directories = z.namelist()
class_names = _get_all_class_names(directories)
assert len(class_names) == _NUM_CLASSES, (len(class_names), _NUM_CLASSES)
label_str_to_int = {}
for int_id, subject_id in enumerate(class_names):
label_str_to_int[subject_id] = int_id
metadata = types.DatasetMetaData(
num_channels=1,
num_classes=len(class_names),
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_str_to_int,
labels=class_names,
task_type='classification',
image_type='face'))
def path_to_label(path: str) -> int:
fname, extension = os.path.splitext(os.path.basename(path))
assert extension == '.pgm'
subject_id, _ = fname.split('_')
class_id = label_str_to_int[subject_id]
return class_id
def gen(split):
def path_filter_fn(path: str) -> bool:
image_id, extension = os.path.splitext(os.path.basename(path))
if extension != '.pgm':
return False
pose = image_id[8:11]
assert pose[0] == 'P'
return pose in _POSES[split]
return utils.generate_images_from_zip_files(
dataset_path=dataset_path,
zip_file_names=[zip_name],
path_to_label_fn=path_to_label,
ignored_files_regex=_IGNORED_FILES_REGEX,
path_filter=path_filter_fn)
per_split_gen = {}
for split in _SPLITS:
per_split_gen[split] = gen(split)
return metadata, per_split_gen
extended_yaleb_dataset = types.DownloadableDataset(
name='extended_yaleb',
download_urls=[
types.KaggleDataset(
dataset_name='souvadrahati/extended-yale-dataset-b',
checksum='ef37284be91fe0c81dcd96baa948a2db')
],
handler=extended_yaleb_handler,
paper_title='Acquiring Linear Subspaces for Face Recognition under Variable Lighting',
authors='Kuang-Chih Lee, Jeffrey Ho, and David Kriegman',
year='2005',
website_url='http://vision.ucsd.edu/~leekc/ExtYaleDatabase/ExtYaleB.html')
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/extended_yaleb.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UMIST handler."""
import os
from typing import Dict
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id: Dict[str, int]) -> int:
label = os.path.dirname(path)
return label_to_id[label]
def umist_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for UMIST dataset."""
files = gfile.listdir(dataset_path)
labels = [
'1s', '1r', '1n', '1i', '1d', '1e', '1q', '1c', '1k', '1l', '1h', '1o',
'1a', '1b', '1t', '1m', '1g', '1f', '1p', '1j'
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=20,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='face',
))
def make_gen_fn():
return eu.generate_images_from_tarfiles(
*files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
working_directory=dataset_path)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
umist_dataset = types.DownloadableDataset(
name='umist',
download_urls=[
types.DownloadableArtefact(
url='http://eprints.lincoln.ac.uk/id/eprint/16081/1/face.tar.gz',
checksum='11011ab5dded043f5e4331711c2407c8')
],
website_url='http://eprints.lincoln.ac.uk/id/eprint/16081',
handler=umist_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/umist.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to manage splits.
Example of using the split function:
```
per_split_gen = random_split_generator_into_splits_with_fractions(make_gen_fn,
SPLIT_WITH_FRACTIONS_FOR_ALL_DATA)
```
"""
import functools
from typing import Any, Callable, Dict, Optional
from dm_nevis.datasets_storage.handlers import types
import numpy as np
SPLIT_WITH_FRACTIONS_FOR_ALL_DATA = {
'train': 0.56,
'dev': 0.12,
'dev-test': 0.12,
'test': 0.2
}
SPLIT_WITH_FRACTIONS_FOR_TRAIN = {'train': 0.7, 'dev': 0.15, 'dev-test': 0.15}
SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY = {'train': 0.8, 'dev': 0.2}
MERGED_TRAIN_AND_DEV = {'train_and_dev': ('train', 'dev')}
_DEFAULT_SPLIT_SEED = 0
# TODO: Make it producing more balanced random subsets.
# TODO: Add a parameter with total number of examples leading to
# better fractionning.
# TODO: Refactor it into splits.py file.
def random_split_generator_into_splits_with_fractions(
make_gen_fn: Callable[[], types.DataGenerator],
splits_with_fractions: Dict[str, float],
merged_splits_to_split_names: Optional[Dict[str, Any]] = None,
split_seed: int = _DEFAULT_SPLIT_SEED,
) -> Dict[str, types.DataGenerator]:
"""Randomly splits generator into disjoint subsets with specified fractions.
The function goes sequentially through the elements of the original generator
and randomly (based on categorical distribution with given fractions) assigns
each element to a split. In order to create disjoint subsets, this function
keeps an internal hash_map, which maps an id of the element from the original
generator into assigned split.
Args:
make_gen_fn: Callable which creates a generator.
splits_with_fractions: Dictionary with split_name into a resulting fraction.
merged_splits_to_split_names: Optional dictionary mapping a new merged split
names to original split names.
split_seed: Seed used for random number generator in order to assign the
fractions.
Returns:
Dictionary mapping split_name to a corresponding split generator.
"""
fractions = [
fraction for _, fraction in sorted(splits_with_fractions.items())
]
assert np.isclose(np.sum(fractions), 1.0)
random_state = np.random.RandomState(seed=split_seed)
assign_fn = lambda x: np.argmax(random_state.multinomial(1, fractions))
internal_hash_map = dict()
def _hashed_select_from_assign_fn(x, expected_values_list, assign_fn,
internal_hash_map):
if x not in internal_hash_map:
internal_hash_map[x] = assign_fn(x)
return internal_hash_map[x] in expected_values_list
def _select_subsplit(gen, select_fn):
for idx, elem in enumerate(gen):
if select_fn(idx):
yield elem
per_split_gen = dict()
split_name_to_split_id = dict()
for split_id, (split_name,
_) in enumerate(sorted(splits_with_fractions.items())):
select_fn = functools.partial(
_hashed_select_from_assign_fn,
expected_values_list=[split_id],
assign_fn=assign_fn,
internal_hash_map=internal_hash_map)
per_split_gen[split_name] = _select_subsplit(make_gen_fn(), select_fn)
split_name_to_split_id[split_name] = split_id
if merged_splits_to_split_names is None:
return per_split_gen
for (merged_split_name,
splits_to_merge) in merged_splits_to_split_names.items():
expected_values_list = []
for split_name in splits_to_merge:
if split_name not in splits_with_fractions:
raise ValueError(
f'{split_name} specified in `merged_splits_to_split_names` is not '
'one of the original splits specified in `splits_with_fractions`.')
expected_values_list.append(split_name_to_split_id[split_name])
select_fn = functools.partial(
_hashed_select_from_assign_fn,
expected_values_list=expected_values_list,
assign_fn=assign_fn,
internal_hash_map=internal_hash_map)
per_split_gen[merged_split_name] = _select_subsplit(make_gen_fn(),
select_fn)
return per_split_gen
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/splits.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stanford Cars handler."""
import os
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
import scipy.io
_CAR_IMS_FNAME = 'car_ims.tgz'
_CARS_ANNOT_FNAME = 'cars_annos.mat'
_LABELS = [
'AM General Hummer SUV 2000', 'Acura RL Sedan 2012', 'Acura TL Sedan 2012',
'Acura TL Type-S 2008', 'Acura TSX Sedan 2012', 'Acura Integra Type R 2001',
'Acura ZDX Hatchback 2012', 'Aston Martin V8 Vantage Convertible 2012',
'Aston Martin V8 Vantage Coupe 2012',
'Aston Martin Virage Convertible 2012', 'Aston Martin Virage Coupe 2012',
'Audi RS 4 Convertible 2008', 'Audi A5 Coupe 2012', 'Audi TTS Coupe 2012',
'Audi R8 Coupe 2012', 'Audi V8 Sedan 1994', 'Audi 100 Sedan 1994',
'Audi 100 Wagon 1994', 'Audi TT Hatchback 2011', 'Audi S6 Sedan 2011',
'Audi S5 Convertible 2012', 'Audi S5 Coupe 2012', 'Audi S4 Sedan 2012',
'Audi S4 Sedan 2007', 'Audi TT RS Coupe 2012',
'BMW ActiveHybrid 5 Sedan 2012', 'BMW 1 Series Convertible 2012',
'BMW 1 Series Coupe 2012', 'BMW 3 Series Sedan 2012',
'BMW 3 Series Wagon 2012', 'BMW 6 Series Convertible 2007',
'BMW X5 SUV 2007', 'BMW X6 SUV 2012', 'BMW M3 Coupe 2012',
'BMW M5 Sedan 2010', 'BMW M6 Convertible 2010', 'BMW X3 SUV 2012',
'BMW Z4 Convertible 2012',
'Bentley Continental Supersports Conv. Convertible 2012',
'Bentley Arnage Sedan 2009', 'Bentley Mulsanne Sedan 2011',
'Bentley Continental GT Coupe 2012', 'Bentley Continental GT Coupe 2007',
'Bentley Continental Flying Spur Sedan 2007',
'Bugatti Veyron 16.4 Convertible 2009', 'Bugatti Veyron 16.4 Coupe 2009',
'Buick Regal GS 2012', 'Buick Rainier SUV 2007', 'Buick Verano Sedan 2012',
'Buick Enclave SUV 2012', 'Cadillac CTS-V Sedan 2012',
'Cadillac SRX SUV 2012', 'Cadillac Escalade EXT Crew Cab 2007',
'Chevrolet Silverado 1500 Hybrid Crew Cab 2012',
'Chevrolet Corvette Convertible 2012', 'Chevrolet Corvette ZR1 2012',
'Chevrolet Corvette Ron Fellows Edition Z06 2007',
'Chevrolet Traverse SUV 2012', 'Chevrolet Camaro Convertible 2012',
'Chevrolet HHR SS 2010', 'Chevrolet Impala Sedan 2007',
'Chevrolet Tahoe Hybrid SUV 2012', 'Chevrolet Sonic Sedan 2012',
'Chevrolet Express Cargo Van 2007', 'Chevrolet Avalanche Crew Cab 2012',
'Chevrolet Cobalt SS 2010', 'Chevrolet Malibu Hybrid Sedan 2010',
'Chevrolet TrailBlazer SS 2009',
'Chevrolet Silverado 2500HD Regular Cab 2012',
'Chevrolet Silverado 1500 Classic Extended Cab 2007',
'Chevrolet Express Van 2007', 'Chevrolet Monte Carlo Coupe 2007',
'Chevrolet Malibu Sedan 2007', 'Chevrolet Silverado 1500 Extended Cab 2012',
'Chevrolet Silverado 1500 Regular Cab 2012', 'Chrysler Aspen SUV 2009',
'Chrysler Sebring Convertible 2010',
'Chrysler Town and Country Minivan 2012', 'Chrysler 300 SRT-8 2010',
'Chrysler Crossfire Convertible 2008',
'Chrysler PT Cruiser Convertible 2008', 'Daewoo Nubira Wagon 2002',
'Dodge Caliber Wagon 2012', 'Dodge Caliber Wagon 2007',
'Dodge Caravan Minivan 1997', 'Dodge Ram Pickup 3500 Crew Cab 2010',
'Dodge Ram Pickup 3500 Quad Cab 2009', 'Dodge Sprinter Cargo Van 2009',
'Dodge Journey SUV 2012', 'Dodge Dakota Crew Cab 2010',
'Dodge Dakota Club Cab 2007', 'Dodge Magnum Wagon 2008',
'Dodge Challenger SRT8 2011', 'Dodge Durango SUV 2012',
'Dodge Durango SUV 2007', 'Dodge Charger Sedan 2012',
'Dodge Charger SRT-8 2009', 'Eagle Talon Hatchback 1998',
'FIAT 500 Abarth 2012', 'FIAT 500 Convertible 2012',
'Ferrari FF Coupe 2012', 'Ferrari California Convertible 2012',
'Ferrari 458 Italia Convertible 2012', 'Ferrari 458 Italia Coupe 2012',
'Fisker Karma Sedan 2012', 'Ford F-450 Super Duty Crew Cab 2012',
'Ford Mustang Convertible 2007', 'Ford Freestar Minivan 2007',
'Ford Expedition EL SUV 2009', 'Ford Edge SUV 2012',
'Ford Ranger SuperCab 2011', 'Ford GT Coupe 2006',
'Ford F-150 Regular Cab 2012', 'Ford F-150 Regular Cab 2007',
'Ford Focus Sedan 2007', 'Ford E-Series Wagon Van 2012',
'Ford Fiesta Sedan 2012', 'GMC Terrain SUV 2012', 'GMC Savana Van 2012',
'GMC Yukon Hybrid SUV 2012', 'GMC Acadia SUV 2012',
'GMC Canyon Extended Cab 2012', 'Geo Metro Convertible 1993',
'HUMMER H3T Crew Cab 2010', 'HUMMER H2 SUT Crew Cab 2009',
'Honda Odyssey Minivan 2012', 'Honda Odyssey Minivan 2007',
'Honda Accord Coupe 2012', 'Honda Accord Sedan 2012',
'Hyundai Veloster Hatchback 2012', 'Hyundai Santa Fe SUV 2012',
'Hyundai Tucson SUV 2012', 'Hyundai Veracruz SUV 2012',
'Hyundai Sonata Hybrid Sedan 2012', 'Hyundai Elantra Sedan 2007',
'Hyundai Accent Sedan 2012', 'Hyundai Genesis Sedan 2012',
'Hyundai Sonata Sedan 2012', 'Hyundai Elantra Touring Hatchback 2012',
'Hyundai Azera Sedan 2012', 'Infiniti G Coupe IPL 2012',
'Infiniti QX56 SUV 2011', 'Isuzu Ascender SUV 2008', 'Jaguar XK XKR 2012',
'Jeep Patriot SUV 2012', 'Jeep Wrangler SUV 2012', 'Jeep Liberty SUV 2012',
'Jeep Grand Cherokee SUV 2012', 'Jeep Compass SUV 2012',
'Lamborghini Reventon Coupe 2008', 'Lamborghini Aventador Coupe 2012',
'Lamborghini Gallardo LP 570-4 Superleggera 2012',
'Lamborghini Diablo Coupe 2001', 'Land Rover Range Rover SUV 2012',
'Land Rover LR2 SUV 2012', 'Lincoln Town Car Sedan 2011',
'MINI Cooper Roadster Convertible 2012',
'Maybach Landaulet Convertible 2012', 'Mazda Tribute SUV 2011',
'McLaren MP4-12C Coupe 2012', 'Mercedes-Benz 300-Class Convertible 1993',
'Mercedes-Benz C-Class Sedan 2012', 'Mercedes-Benz SL-Class Coupe 2009',
'Mercedes-Benz E-Class Sedan 2012', 'Mercedes-Benz S-Class Sedan 2012',
'Mercedes-Benz Sprinter Van 2012', 'Mitsubishi Lancer Sedan 2012',
'Nissan Leaf Hatchback 2012', 'Nissan NV Passenger Van 2012',
'Nissan Juke Hatchback 2012', 'Nissan 240SX Coupe 1998',
'Plymouth Neon Coupe 1999', 'Porsche Panamera Sedan 2012',
'Ram C/V Cargo Van Minivan 2012',
'Rolls-Royce Phantom Drophead Coupe Convertible 2012',
'Rolls-Royce Ghost Sedan 2012', 'Rolls-Royce Phantom Sedan 2012',
'Scion xD Hatchback 2012', 'Spyker C8 Convertible 2009',
'Spyker C8 Coupe 2009', 'Suzuki Aerio Sedan 2007',
'Suzuki Kizashi Sedan 2012', 'Suzuki SX4 Hatchback 2012',
'Suzuki SX4 Sedan 2012', 'Tesla Model S Sedan 2012',
'Toyota Sequoia SUV 2012', 'Toyota Camry Sedan 2012',
'Toyota Corolla Sedan 2012', 'Toyota 4Runner SUV 2012',
'Volkswagen Golf Hatchback 2012', 'Volkswagen Golf Hatchback 1991',
'Volkswagen Beetle Hatchback 2012', 'Volvo C30 Hatchback 2012',
'Volvo 240 Sedan 1993', 'Volvo XC90 SUV 2007',
'smart fortwo Convertible 2012'
]
def _parse_labels(annotations):
fname_to_labels = dict()
for annotation in annotations:
image_fname = os.path.basename(annotation['relative_im_path'].item())
label = annotation['class'].item() - 1
fname_to_labels[image_fname] = label
return fname_to_labels
def stanford_cars_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for Standford Cars dataset."""
annotations = scipy.io.loadmat(os.path.join(dataset_path,
_CARS_ANNOT_FNAME))['annotations']
train_fname_to_labels = _parse_labels(annotations[annotations['test'] == 0])
test_fname_to_labels = _parse_labels(annotations[annotations['test'] == 1])
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
def gen(annotations):
with tarfile.open(os.path.join(dataset_path, _CAR_IMS_FNAME), 'r|gz') as tf:
for member in tf:
image_fname = os.path.basename(member.path)
if image_fname not in annotations:
continue
label = annotations[image_fname]
image = Image.open(tf.extractfile(member)).convert('RGB')
image.load()
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=196,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
make_train_gen = lambda: gen(train_fname_to_labels)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_train_gen, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen(test_fname_to_labels)
return metadata, per_split_gen
stanford_cars_dataset = types.DownloadableDataset(
name='stanford_cars',
download_urls=[
types.DownloadableArtefact(
url='http://ai.stanford.edu/~jkrause/car196/car_ims.tgz',
checksum='d5c8f0aa497503f355e17dc7886c3f14'),
types.DownloadableArtefact(
url='http://ai.stanford.edu/~jkrause/car196/cars_annos.mat',
checksum='b407c6086d669747186bd1d764ff9dbc')
],
website_url='http://ai.stanford.edu/~jkrause/cars/car_dataset.html',
handler=stanford_cars_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/stanford_cars.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aberdeen handler."""
import io
import os
import re
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_REGEX = r'[a-zA-Z_]+'
_FNAME = 'Aberdeen.zip'
_DUPLICATES = {
'marieg': 'marie',
'heatherg': 'heather',
'kayg': 'kay',
'neilg': 'neil',
'graemeg': 'graeme',
'dhawleyg': 'dhawley',
'clean_adrian': 'adrian',
'jsheenang': 'jsheenan',
}
_LABELS = [
'neil',
'jenni',
'chris_harbron',
'john_mccal',
'blaw',
'dpearson',
'ruth',
'richard_hardwick',
'olive',
'dlow',
'simon',
'dougal_grant',
'mnicholson',
'hack',
'caroline',
'lynn',
'adrian',
'kay',
'fiona_hogarth',
'annanena',
'heather',
'barry',
'jsheenang',
'michael',
'alister',
'amellanby',
'george',
'graham_brown',
'itaylor',
'marie',
'david',
'jim',
'alison',
'trevor',
'iroy',
'scott',
'louise',
'dsmith',
'gfindley',
'irene',
'tracy',
'johannes',
'chris_pin',
'anon_one',
'stewart',
'lynn_james',
'peter',
'paul',
'pkyle',
'andrew',
'mmanson',
'graeme',
'fiona',
'ghall',
'paol',
'david_imray',
'john_thom',
'stephen',
'gordon',
'gillian',
'dhands',
'joanna',
'nick',
'bfegan',
'grant_cumming',
'alec',
'milly',
'merilyn',
'kirsty',
'peter_macgeorge',
'dbell',
'chris',
'miranda',
'johnny_page',
'pat',
'terry_johnstone',
'tock',
'catherine',
'blair',
'kieran',
'martin',
'hin',
'meggan',
'jsheenan',
'brian_ho',
'mark',
'dhawley',
'derek',
'lisa',
'ian',
'kim',
'dave_faquhar',
]
def aberdeen_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for aberdeen dataset."""
num_classes = len(_LABELS)
label_to_id = dict(((label, idx) for idx, label in enumerate(_LABELS)))
def make_gen():
with zipfile.ZipFile(os.path.join(dataset_path, _FNAME), 'r') as zf:
for member in zf.infolist():
img_fname = os.path.splitext(member.filename)[0]
label_name = re.search(_REGEX, img_fname)[0]
if label_name in _DUPLICATES:
label_name = _DUPLICATES[label_name]
label = label_to_id[label_name]
image = Image.open(io.BytesIO(zf.read(member)))
yield image, label
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_to_id=label_to_id))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
aberdeen_dataset = types.DownloadableDataset(
name='aberdeen',
download_urls=[
types.DownloadableArtefact(
url='http://pics.stir.ac.uk/zips/Aberdeen.zip',
checksum='1f7044bd5f0bed01286263aa580a7a87'),
],
handler=aberdeen_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/aberdeen.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wiki Paintrings handler."""
import io
import os
import zipfile
import zlib
from absl import logging
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import pandas as pd
from PIL import Image
# Resize images for this dataset to the given max size. The original images can
# have much larger sizes, however this is unnecessarily high for the task, and
# results in slower training due to increased decoding time.
MAXIMUM_IMAGE_SIZE = 256
_IMAGE_FNAME = 'wikiart.zip'
_CSV_FNAME = 'wikiart_csv.zip'
# Style, Artist, and Genre classification
_ARTIST_TO_CLASS_ID = {
'Albrecht_Durer': 0,
'Boris_Kustodiev': 1,
'Camille_Pissarro': 2,
'Childe_Hassam': 3,
'Claude_Monet': 4,
'Edgar_Degas': 5,
'Eugene_Boudin': 6,
'Gustave_Dore': 7,
'Ilya_Repin': 8,
'Ivan_Aivazovsky': 9,
'Ivan_Shishkin': 10,
'John_Singer_Sargent': 11,
'Marc_Chagall': 12,
'Martiros_Saryan': 13,
'Nicholas_Roerich': 14,
'Pablo_Picasso': 15,
'Paul_Cezanne': 16,
'Pierre_Auguste_Renoir': 17,
'Pyotr_Konchalovsky': 18,
'Raphael_Kirchner': 19,
'Rembrandt': 20,
'Salvador_Dali': 21,
'Vincent_van_Gogh': 22,
}
STYLE_TO_CLASS_ID = {
'Abstract_Expressionism': 0,
'Action_painting': 1,
'Analytical_Cubism': 2,
'Art_Nouveau': 3,
'Baroque': 4,
'Color_Field_Painting': 5,
'Contemporary_Realism': 6,
'Cubism': 7,
'Early_Renaissance': 8,
'Expressionism': 9,
'Fauvism': 10,
'High_Renaissance': 11,
'Impressionism': 12,
'Mannerism_Late_Renaissance': 13,
'Minimalism': 14,
'Naive_Art_Primitivism': 15,
'New_Realism': 16,
'Northern_Renaissance': 17,
'Pointillism': 18,
'Pop_Art': 19,
'Post_Impressionism': 20,
'Realism': 21,
'Rococo': 22,
'Romanticism': 23,
'Symbolism': 24,
'Synthetic_Cubism': 25,
'Ukiyo_e': 26,
}
_GENRE_TO_CLASS_ID = {
'abstract_painting': 0,
'cityscape': 1,
'genre_painting': 2,
'illustration': 3,
'landscape': 4,
'nude_painting': 5,
'portrait': 6,
'religious_painting': 7,
'sketch_and_study': 8,
'still_life': 9,
}
_TASKS = ['artist', 'style', 'genre']
def wiki_paintings_handler(dataset_path: str,
task: str = 'style') -> types.HandlerOutput:
"""Handler for Wiki Paintings dataset."""
assert task in _TASKS
train_csv = f'{task}_train.csv'
val_csv = f'{task}_val.csv'
with zipfile.ZipFile(os.path.join(dataset_path, _CSV_FNAME), 'r') as zf:
train_ids = pd.read_csv(zf.extract(train_csv, path=dataset_path))
val_ids = pd.read_csv(zf.extract(val_csv, path=dataset_path))
def gen(ids):
with zipfile.ZipFile(os.path.join(dataset_path, _IMAGE_FNAME), 'r') as zf:
for _, row in ids.iterrows():
image_name, class_id = row
image_fname = os.path.join('wikiart', image_name)
try:
image = Image.open(io.BytesIO(zf.read(image_fname)))
image = extraction_utils.resize_to_max_size(image, MAXIMUM_IMAGE_SIZE)
yield types.Example(image=image, label=class_id, multi_label=None)
except (zlib.error, zipfile.BadZipFile):
# Very few images cannot be read.
logging.warning('Skipping %s', image_fname)
train_gen_fn = lambda: gen(train_ids)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
train_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen(val_ids)
if task == 'artist':
label_to_id = _ARTIST_TO_CLASS_ID
num_classes = 23
elif task == 'style':
label_to_id = STYLE_TO_CLASS_ID
num_classes = 27
elif task == 'genre':
label_to_id = _GENRE_TO_CLASS_ID
num_classes = 10
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(label_to_id=label_to_id))
return (metadata, per_split_gen)
img_artefact = types.DownloadableArtefact(
url='http://web.fsktm.um.edu.my/~cschan/source/ICIP2017/wikiart.zip',
checksum='c6b43cbcd474b875a5626ffde3b627e1')
csv_artefact = types.DownloadableArtefact(
url='http://web.fsktm.um.edu.my/~cschan/source/ICIP2017/wikiart_csv.zip',
checksum='0d221e7cb0812da6b59044cfca9aafee')
# TODO: redundant DL
wiki_paintings_dataset_artist = types.DownloadableDataset(
name='wiki_paintings_artist',
download_urls=[img_artefact, csv_artefact],
website_url='https://github.com/cs-chan/ArtGAN/tree/master/WikiArt%20Dataset',
handler=lambda ds: wiki_paintings_handler(ds, task='artist'),
)
wiki_paintings_dataset_style = types.DownloadableDataset(
name='wiki_paintings_style',
download_urls=[img_artefact, csv_artefact],
website_url='https://github.com/cs-chan/ArtGAN/tree/master/WikiArt%20Dataset',
handler=lambda ds: wiki_paintings_handler(ds, task='style'),
)
wiki_paintings_dataset_genre = types.DownloadableDataset(
name='wiki_paintings_genre',
download_urls=[img_artefact, csv_artefact],
website_url='https://github.com/cs-chan/ArtGAN/tree/master/WikiArt%20Dataset',
handler=lambda ds: wiki_paintings_handler(ds, task='genre'),
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/wiki_paintings.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caltech Categories dataset handler.
This dataset has been created from
http://www.vision.caltech.edu/html-files/archive.html.
On this page, datasets containining cars, motorcycles, faces, leaves, airplanes,
and background categories are provided.
At the time of creation, the download links for the leaves, airplanes and
background categories were not available, and so these have been omitted.
This dataset may be created with a subset of the available categories. Each
category is assigned a class label.
Note that the class labels are not comparable across different subsets of the
dataset. The class labels always start at 0 and go to the maximum number of
available classes in the returned dataset.
"""
import glob
import io
import os
import shutil
import tarfile
from typing import Iterable, Sequence, Set
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
CARS_2001_PATH = "Cars_2001/cars_brad.tar"
CARS_1999_PATH = "Cars_1999/cars_markus.tar"
FACES_PATH = "faces.tar"
MOTORCYCLES_2001_PATH = "motorbikes_side/motorbikes_side.tar"
AVAILABLE_CATEGORIES = frozenset([
"cars_2001",
"cars_1999",
"motorcycles_2001",
])
_IGNORED_FILES_REGEX = r"^README$|\.mat$"
def category_to_class_name(category: str) -> str:
return {
"cars_2001": "car",
"cars_1999": "car",
"motorcycles_2001": "motorcycle",
}[category]
def caltech_categories_handler(
artifacts_path: str,
*,
categories: Iterable[str] = AVAILABLE_CATEGORIES) -> types.HandlerOutput:
"""Caltech Categories."""
categories = set(categories)
unknown_categories = categories - AVAILABLE_CATEGORIES
if unknown_categories:
raise ValueError(f"Categories `{unknown_categories}` are not available")
for zip_file in glob.glob(os.path.join(artifacts_path, "*.zip")):
unpacked_archive = zip_file.replace(".zip", "")
if not os.path.exists(unpacked_archive):
shutil.unpack_archive(zip_file, extract_dir=artifacts_path)
classes = _classes_from_categories(categories)
metadata = types.DatasetMetaData(
num_classes=len(classes),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata={
"class_names": classes,
})
def gen():
for category in sorted(categories):
path = _category_to_file_path(category)
class_index = classes.index(category_to_class_name(category))
def path_to_label_fn(_, label=class_index):
return label
yield from extraction_utils.generate_images_from_tarfiles(
path,
working_directory=artifacts_path,
path_to_label_fn=path_to_label_fn,
ignored_files_regex=_IGNORED_FILES_REGEX,
convert_mode="RGB")
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
def write_fixture(path: str) -> None:
"""Writes a fixture of the dataset to the given path."""
for category in AVAILABLE_CATEGORIES:
filename = _category_to_file_path(category)
basedir = filename.split("/")[0]
os.makedirs(os.path.join(path, basedir), exist_ok=True)
_write_fixture_images(os.path.join(path, filename), num_images=5)
def _write_fixture_images(path, num_images):
"""Writes a tarfile of fixture images to the given path."""
with tarfile.open(path, "w") as tf:
for i in range(num_images):
image = Image.new("RGB", size=(50, 50), color=(155, 0, 0))
buffer = io.BytesIO()
image.save(buffer, "jpeg")
buffer.seek(0)
info = tarfile.TarInfo(f"image_{i:04}")
info.size = len(buffer.getbuffer())
tf.addfile(info, buffer)
def _classes_from_categories(categories: Set[str]) -> Sequence[str]:
classes = set(category_to_class_name(category) for category in categories)
return sorted(classes)
def _category_to_file_path(category: str) -> str:
return {
"cars_2001": CARS_2001_PATH,
"cars_1999": CARS_1999_PATH,
"motorcycles_2001": MOTORCYCLES_2001_PATH,
}[category]
caltech_categories_dataset = types.DownloadableDataset(
name="caltech_categories",
download_urls=[
types.DownloadableArtefact(
url="https://data.caltech.edu/records/dvx6b-vsc46/files/Cars_2001.zip?download=1",
checksum="e68efb9197ed6a9bc94ce46c79378d29"),
types.DownloadableArtefact(
url="https://data.caltech.edu/records/fmbpr-ezq86/files/Cars_1999.zip?download=1",
checksum="79b12e08cf5f711f27bbd3e9c6bf371f"),
types.DownloadableArtefact(
url="https://data.caltech.edu/records/pxb2q-1e144/files/motorbikes_side.zip?download=1",
checksum="ac51dc40c8df085c6663f38307685079"),
types.DownloadableArtefact(
url="https://data.caltech.edu/records/6rjah-hdv18/files/faces.tar?download=1",
checksum="a6e5b794952e362560dba0cb6601307d")
],
website_url="https://data.caltech.edu/",
handler=caltech_categories_handler,
fixture_writer=write_fixture)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/caltech_categories.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dataset handlers."""
import hashlib
import io
import os
import re
import shutil
import tarfile
from typing import Callable, Iterator, List, Optional, Tuple
import zipfile
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
DEFAULT_IGNORED_FILES_REGEX = r'.*Thumbs.db$'
PathToLabelFn = Callable[[str], Optional[int]]
PathToAttributesFn = Callable[[str], Optional[List[int]]]
PathFilter = Callable[[str], bool]
class ImageDecodingError(ValueError):
"""Raised for files that fail to decode as images."""
def resize_to_max_size(image: Image.Image, max_size: int) -> Image.Image:
"""Resizes an image so that it is no larger than the given size."""
height, width = image.height, image.width
longest_side = max(height, width)
if longest_side <= max_size:
return image
factor = max_size / longest_side
new_height = int(factor * height)
new_width = int(factor * width)
return image.resize((new_width, new_height), resample=Image.BICUBIC)
def to_one_hot(label, num_classes):
one_hot_label = np.zeros((num_classes,)).astype(int)
np.put(one_hot_label, label, 1)
return one_hot_label
def unpack_file(packed_file_name, ds_path):
packed_file_path = os.path.join(ds_path, packed_file_name)
shutil.unpack_archive(packed_file_path, extract_dir=ds_path)
os.remove(packed_file_path)
def generate_images_from_tarfiles(
*paths: str,
path_to_label_fn: PathToLabelFn,
working_directory: str = '',
ignored_files_regex: str = DEFAULT_IGNORED_FILES_REGEX,
tarfile_read_mode: str = 'r',
convert_mode: Optional[str] = None,
) -> Iterator[Tuple[Image.Image, types.Label]]:
"""Generates (image, label) pairs from tar file.
Args:
*paths: The positional arguments are all treated as paths to tar files.
path_to_label_fn: A callable returning an integer label given the path of
the member being extracted. If the label is None, then the file will be
ignored.
working_directory: If provided, all paths will be opened relative to this
path.
ignored_files_regex: A regex used to ignore files that should not be
extracted.
tarfile_read_mode: A extension to use for reading tarfile.
convert_mode: A mode to convert the image to (no conversion by default).
Yields:
(image, label) pairs consecutively from each of the input tar files.
"""
for path in paths:
with tarfile.open(os.path.join(working_directory, path),
tarfile_read_mode) as tf:
for member in tf:
if member.isdir() or re.search(ignored_files_regex, member.name):
continue
label = path_to_label_fn(member.path)
if label is None:
continue
try:
image = Image.open(tf.extractfile(member))
if convert_mode:
image = image.convert(convert_mode)
image.load()
except Exception as e:
raise ImageDecodingError(
f'Failed to decode as image: {member.path}') from e
yield (image, label)
def generate_images_from_zip(
zf: zipfile.ZipFile,
path_to_label_fn: PathToLabelFn,
ignored_files_regex: str = DEFAULT_IGNORED_FILES_REGEX,
path_filter: Optional[PathFilter] = None,
convert_mode: Optional[str] = None,
) -> Iterator[Tuple[Image.Image, types.Label]]:
"""Generates images and labels from z given zipfile.
Args:
zf: A zipfile in read mode.
path_to_label_fn: A callable that maps a file name to the label to use for
the associated image. The file will be igniored if it returns `None`.
ignored_files_regex: Regular expression to ignore given files.
path_filter: a callable to filter a path if it returns False.
convert_mode: A mode to convert the image to (no conversion by default).
Yields:
An iterable over images and labels.
"""
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if (f.is_dir() or re.search(ignored_files_regex, f.filename) or
(path_filter and not path_filter(f.filename))):
continue
label = path_to_label_fn(f.filename)
if label is None:
continue
try:
image = Image.open(io.BytesIO(zf.read(f)))
if convert_mode:
image = image.convert(convert_mode)
image.load()
except Exception as e:
raise ImageDecodingError(
f'Failed to decode as image: {f.filename}') from e
yield (image, label)
def generate_images_from_zip_files(
dataset_path: str,
zip_file_names: List[str],
path_to_label_fn: PathToLabelFn,
ignored_files_regex: str = DEFAULT_IGNORED_FILES_REGEX,
path_filter: Optional[PathFilter] = None,
convert_mode: Optional[str] = None,
):
"""Generates (image, label) pairs from zip file.
Args:
dataset_path: Base path prefixed to all filenames.
zip_file_names: Names of zip files to open.
path_to_label_fn: A callable that maps a file name to the label_id (int).
The file will be ignored if it returns `None`.
ignored_files_regex: Regular expression for files to ignore.
path_filter: a callable to filter a path if it returns False.
convert_mode: A mode to convert the image to (no conversion by default).
Yields:
(image, label) tuples.
"""
for zip_fname in zip_file_names:
with zipfile.ZipFile(os.path.join(dataset_path, zip_fname), 'r') as zf:
yield from generate_images_from_zip(zf, path_to_label_fn,
ignored_files_regex, path_filter,
convert_mode)
def generate_images_from_zip_with_multilabels(
zf: zipfile.ZipFile,
path_to_attributes_fn: PathToAttributesFn,
ignored_files_regex: str = DEFAULT_IGNORED_FILES_REGEX,
path_filter: Optional[PathFilter] = None,
convert_mode: Optional[str] = None,
):
"""Generates images and attributes from z given zipfile.
Args:
zf: A zipfile in read mode.
path_to_attributes_fn: A callable that maps a file name to the attribute
list to use for the associated image. The file will be ignored if it
returns `None`.
ignored_files_regex: Regular expression to ignore given files.
path_filter: a callable to filter a path if it returns False.
convert_mode: A mode to convert the image to (no conversion by default).
Yields:
An iterable over images and labels.
"""
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if (f.is_dir() or re.search(ignored_files_regex, f.filename) or
(path_filter and not path_filter(f.filename))):
continue
attributes = path_to_attributes_fn(f.filename)
if attributes is None:
continue
try:
image = Image.open(io.BytesIO(zf.read(f)))
if convert_mode:
image = image.convert(convert_mode)
image.load()
except Exception as e:
raise ImageDecodingError(
f'Failed to decode as image: {f.filename}') from e
yield types.Example(image=image, multi_label=attributes, label=None)
def generate_images_from_zip_files_with_multilabels(
dataset_path: str,
zip_file_names: List[str],
path_to_attributes_fn: PathToAttributesFn,
ignored_files_regex: str = DEFAULT_IGNORED_FILES_REGEX,
path_filter: Optional[PathFilter] = None,
convert_mode: Optional[str] = None,
):
"""Generates (image, label) pairs from zip file.
Args:
dataset_path: Base path prefixed to all filenames.
zip_file_names: Names of zip files to open.
path_to_attributes_fn: A callable that maps a file name to a list of labels.
The file will be ignored if it returns `None`.
ignored_files_regex: Regular expression for files to ignore.
path_filter: a callable to filter a path if it returns False.
convert_mode: A mode to convert the image to (no conversion by default).
Yields:
(image, label) tuples.
"""
for zip_fname in zip_file_names:
with zipfile.ZipFile(os.path.join(dataset_path, zip_fname), 'r') as zf:
yield from generate_images_from_zip_with_multilabels(
zf, path_to_attributes_fn, ignored_files_regex, path_filter,
convert_mode)
def deduplicate_data_generator(
gen: types.DataGenerator) -> Callable[[], types.DataGenerator]:
"""Reads the data from generator and removes duplicates."""
unique_set = set()
unique_examples = []
for example in gen:
assert not isinstance(example, types.Example)
assert len(example) == 2
image, label = example[:2]
img_hash = hashlib.md5(image.tobytes()).hexdigest()
key = (img_hash, label)
if key not in unique_set:
unique_examples.append((image, label))
unique_set.add(key)
def make_gen_fn():
for example in unique_examples:
yield example
return make_gen_fn
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/extraction_utils.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tiny ImagaNet handler."""
import os
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_NUM_CLASSES = 200
_PREFIX = 'tiny-imagenet-200'
_TEST_PREFIX = 'val'
_LABEL_FILE = 'wnids.txt'
_TEST_ANNOTATIONS = 'val_annotations.txt'
_IGNORED_REGEX_TRAIN = r'.*?\bval\b.*?|.*?\btest\b.*?|.*\.txt$'
_IGNORED_REGEX_TEST = r'.*?\btrain\b.*?|.*?\btest\b.*?|.*\.txt$'
# pylint:disable=missing-function-docstring
def tiny_imagenet_handler(dataset_path: str) -> types.HandlerOutput:
dataset_file = gfile.listdir(dataset_path)
assert len(dataset_file) == 1
dataset_file = dataset_file[0]
labels = set()
with zipfile.ZipFile(os.path.join(dataset_path, dataset_file), 'r') as zf:
# All object codes are given in a single text file
with zf.open(os.path.join(_PREFIX, _LABEL_FILE), 'r') as flabel:
for label in flabel:
label = label.strip()
labels.add(label.decode('utf-8'))
# We use val set as the test set
with zf.open(os.path.join(_PREFIX, _TEST_PREFIX, _TEST_ANNOTATIONS),
'r') as fval:
# Map val annotations to the labels
test_ann_to_label = {}
for line in fval:
line = line.strip()
words = line.decode('utf-8').split('\t')
test_ann_to_label[words[0]] = words[1]
labels = sorted(labels)
label_to_id = dict(((label, idx) for idx, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object'))
def _label_from_filename_tr(filename):
label = os.path.split(filename)[1].split('_')[0]
return label_to_id[label]
def _label_from_filename_test(filename):
label = os.path.split(filename)[1]
label = test_ann_to_label[label]
label = label_to_id[label]
assert 0 <= label < _NUM_CLASSES
return label
def gen_tr():
return utils.generate_images_from_zip_files(
dataset_path, [dataset_file],
_label_from_filename_tr,
ignored_files_regex=_IGNORED_REGEX_TRAIN,
convert_mode='RGB')
def gen_test():
return utils.generate_images_from_zip_files(
dataset_path, [dataset_file],
_label_from_filename_test,
ignored_files_regex=_IGNORED_REGEX_TEST,
convert_mode='RGB')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen_tr, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_test()
return metadata, per_split_gen
tiny_imagenet_dataset = types.DownloadableDataset(
name='tiny_imagenet',
download_urls=[
types.DownloadableArtefact(
url='http://cs231n.stanford.edu/tiny-imagenet-200.zip',
checksum='90528d7ca1a48142e341f4ef8d21d0de')
],
paper_title='Tiny ImageNet Visual Recognition Challenge',
authors='Ya Le and Xuan Yang',
year='2015',
handler=tiny_imagenet_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/tiny_imagenet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chars74k dataset handler."""
import itertools
import os
import tarfile
from typing import Iterable
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits as su
from dm_nevis.datasets_storage.handlers import types
import numpy as np
import pandas as pd
from PIL import Image
import scipy.io
from tensorflow.io import gfile
def chars74k_handler(
dataset_path: str,
handle_categories: Iterable[str] = frozenset({'Img', 'Hnd', 'Fnt'}),
) -> types.HandlerOutput:
"""Imports Chars74k dataset.
Link: http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/
The dataset comes with train/test(/val) splits. We keep the same test set and
consider all the remaining images as training samples.
Possible improvements for future iterations:
- The category Img (see below) comes as Good and Bad images (regarding the
image quality). In the current version, all images are taken into
consideration but it would be possible to load them separately.
- For now, we consider only the dataset of Latin characters. The website
provides another dataset of Kannada characters that can be interesting to
consider in the next iteration.
- Some categories have richer annotations (segmentation, stroke trajectory,
...)
Note: The images of the category Fnt are grey images, while the two other
categories are RGB. All images are therefore converted into RGB to have a
consistent format.
Args:
dataset_path: Path with downloaded datafiles.
handle_categories: Images categories - The data come in 3 types:
- Img: Cropped real images
- Hnd: Hand written images captured from a tablet
- Fnt: Generated characters from different computer fonts
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(dataset_path)
img_markers = {
'Img': 'Bmp',
'Hnd': 'Img',
'Fnt': '',
}
splits_columns = ['ALLnames', 'is_good', 'ALLlabels', 'classlabels',
'classnames', 'NUMclasses', 'TSTind', 'VALind',
'TXNind', 'TRNind']
metadata = types.DatasetMetaData(
num_channels=3,
num_classes=62, # We know that.
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr'
))
def _get_fname_for_category(files, category):
for fname in files:
if category in fname:
return fname
def _get_fname_for_splits(files):
for fname in files:
if 'Lists' in fname:
return fname
def _get_splits_df_from_file(f_obj):
splits = scipy.io.loadmat(f_obj)
df = pd.DataFrame(splits['list'][0], columns=splits_columns)
return df
def _get_test_filenames(files):
fname = _get_fname_for_splits(files)
test_filenames = {}
with tarfile.open(os.path.join(dataset_path, fname)) as tfile:
for member in tfile.getmembers():
if member.isdir():
continue
if 'English' in member.path:
if 'lists.mat' in member.path or 'lists_var_size.mat' in member.path:
name = member.path.split('/')[-2]
f_obj = tfile.extractfile(member)
df = _get_splits_df_from_file(f_obj)
test_filenames[name] = df['ALLnames'][0][df['TSTind'][0][:, -1] - 1]
return test_filenames
def _is_test_image(path, category, test_filenames):
return any(name in path for name in test_filenames[category])
def _extract_label_from_path(path):
starting_pos = path.find('Sample') + 6
return path[starting_pos:starting_pos+3]
def gen_data_for_categories_splits(categories, files, select_test=False):
test_filenames = _get_test_filenames(files)
for category in categories:
fname = _get_fname_for_category(files, category)
with tarfile.open(os.path.join(dataset_path, fname)) as tfile:
for member in tfile.getmembers():
if member.isdir():
continue
if img_markers[category] in member.path and '.png' in member.path:
if _is_test_image(
member.path, category, test_filenames) == select_test:
f_obj = tfile.extractfile(member)
label = np.array(int(_extract_label_from_path(member.path))) -1
image = Image.open(f_obj).convert('RGB')
image.load()
yield (image, label)
train_gen = gen_data_for_categories_splits(handle_categories, files)
test_gen = gen_data_for_categories_splits(
handle_categories, files, select_test=True)
make_unique_gen_fn = eu.deduplicate_data_generator(
itertools.chain(train_gen, test_gen))
per_split_gen = su.random_split_generator_into_splits_with_fractions(
make_unique_gen_fn, su.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
su.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
# The following links may be used as a fallback if the given URLs are not live.
# - https://teodecampos.github.io/chars74k/
chars74k_dataset = types.DownloadableDataset(
name='chars74k',
download_urls=[
types.DownloadableArtefact(
url='http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/EnglishImg.tgz',
checksum='85d157e0c58f998e1cda8def62bcda0d'),
types.DownloadableArtefact(
url='http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/EnglishHnd.tgz',
checksum='95e9ee6d929809ef4c2f509f86388c72'),
types.DownloadableArtefact(
url='http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/EnglishFnt.tgz',
checksum='f6710b1302a7e9693d904b16389d9d8a'),
types.DownloadableArtefact(
url='http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/Lists.tgz',
checksum='8dfe916169fa10b86466776a4a1d7c5f')
],
handler=chars74k_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/chars74k.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Office31 domain adaptation benchmark dataset.
Download seemed unreliable.
"""
import functools
import re
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
LABELS_TO_ID = {
"back_pack": 0,
"bike": 1,
"bike_helmet": 2,
"bookcase": 3,
"bottle": 4,
"calculator": 5,
"desk_chair": 6,
"desk_lamp": 7,
"desktop_computer": 8,
"file_cabinet": 9,
"headphones": 10,
"keyboard": 11,
"laptop_computer": 12,
"letter_tray": 13,
"mobile_phone": 14,
"monitor": 15,
"mouse": 16,
"mug": 17,
"paper_notebook": 18,
"pen": 19,
"phone": 20,
"printer": 21,
"projector": 22,
"punchers": 23,
"ring_binder": 24,
"ruler": 25,
"scissors": 26,
"speaker": 27,
"stapler": 28,
"tape_dispenser": 29,
"trash_can": 30,
}
_ARCHIVE_FILENAME = "domain_adaptation_images.tar.gz"
# Filnames in the archive look like this:
# domain_adaptation_images/amazon/images/bike/frame_0001.jpg
# domain_adaptation_images/dslr/images/bike/frame_0001.jpg
# domain_adaptation_images/webcam/images/bike/frame_0003.jpg
#
#
# where 003 is the label-number 1..257 (one-based).
_FILE_PATH_REGEX = re.compile(
r"(\w+)/images/(.+)/frame_(\d\d\d\d)\.jpg")
SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST = {"test": 0.5, "dev-test": 0.5}
def office31_handler(download_path: str) -> types.HandlerOutput:
"""Imports images from Office 31 domain adaptation dataset.
Args:
download_path: Directory containing the downloaded raw data.
Returns:
HandlerOutput
"""
def path_to_label_fn(fname, domain_ls):
fname_match = _FILE_PATH_REGEX.match(fname)
if not fname_match:
return None
else:
domain_str = fname_match.group(1)
label_str = fname_match.group(2)
label_id = LABELS_TO_ID[label_str]
if domain_str not in domain_ls:
# Only import images with matching domain
return None
return label_id
def gen_split(domain_ls):
yield from extraction_utils.generate_images_from_tarfiles(
_ARCHIVE_FILENAME,
working_directory=download_path,
path_to_label_fn=functools.partial(path_to_label_fn,
domain_ls=domain_ls))
metadata = types.DatasetMetaData(
num_classes=len(LABELS_TO_ID),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_str_to_id=LABELS_TO_ID,
task_type="classification",
image_type="object"))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
functools.partial(gen_split, domain_ls=["amazon", "dlsr"]),
splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
test_dev_test_gen = splits.random_split_generator_into_splits_with_fractions(
functools.partial(gen_split, domain_ls=["webcam"]),
SPLIT_WITH_FRACTIONS_FOR_TEST_AND_DEV_TEST)
per_split_gen = per_split_gen | test_dev_test_gen
return (metadata, per_split_gen)
office31_dataset = types.DownloadableDataset(
name="office31",
download_urls=[
types.DownloadableArtefact(
url="https://drive.google.com/u/0/uc?id=0B4IapRTv9pJ1WGZVd1VDMmhwdlE&export=download&confirm=y",
checksum="1b536d114869a5a8aa4580b89e9758fb")
],
website_url="https://faculty.cc.gatech.edu/~judy/domainadapt/",
paper_title="Adapting Visual Category Models to New Domains",
authors="Kate Saenko, Brian Kulis, Mario Fritz & Trevor Darrell",
papers_with_code_url="https://paperswithcode.com/dataset/office-31",
handler=office31_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/office31.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ALOI dataset handler."""
import functools
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_TRAIN_CATEGORIES = ('col', 'view')
_TEST_CATEGORIES = ('ill',)
# TODO: handle 'masks' file as we for now don't know what it is for.
# pylint:disable=missing-function-docstring
def aloi_handler(
dataset_path: str,
is_grey: bool = False,
) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
metadata = types.DatasetMetaData(
num_channels=1 if is_grey else 3,
num_classes=1000, # We know that.
image_shape=(), # Ignored for now.
additional_metadata=dict())
def _get_fname_for_category(files, category):
for fname in files:
if category in fname:
return fname
def gen_data_for_categories(categories, files):
fnames = [
_get_fname_for_category(files, category) for category in categories
]
return utils.generate_images_from_tarfiles(
*fnames,
working_directory=dataset_path,
path_to_label_fn=lambda x: int(x.split('/')[1]) - 1)
train_gen_fn = lambda: gen_data_for_categories(_TRAIN_CATEGORIES, files)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
train_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_data_for_categories(_TEST_CATEGORIES, files)
return (metadata, per_split_gen)
aloi_dataset = types.DownloadableDataset(
name='aloi',
download_urls=[
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_red4_ill.tar',
checksum='0f72c3acf66908e61b6f8d5bf5981193'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_red4_col.tar',
checksum='c0486588258de8a26df4ed7aff0b34f8'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_red4_view.tar',
checksum='ca29f342bb0c1fa4788e9f76f591ec59'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_red4_stereo.tar',
checksum='44fef81680375641ea374c32dbe6d68d')
# TODO: Need to handle masks somehow, but it is not specified
# on the website
# 'http://aloi.science.uva.nl/tars/aloi_mask4.tar',
],
handler=aloi_handler)
aloi_grey_dataset = types.DownloadableDataset(
name='aloi_grey',
download_urls=[
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_grey_red4_ill.tar',
checksum='70eca95ebfcfb253eb7be199f158f10c'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_grey_red4_col.tar',
checksum='91a31a2692797c899973e84783c08e92'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_grey_red4_view.tar',
checksum='0347be95470dbb22cc02e441d8b7e956'),
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/tars/aloi_grey_red4_stereo.tar',
checksum='bc059412fd6620fd49feb89655284f4a')
# TODO: Need to handle masks somehow, but it is not specified
# on the website
# 'http://aloi.science.uva.nl/tars/aloi_mask4.tar',
],
handler=functools.partial(aloi_handler, is_grey=True))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/aloi.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BIWI dataset handler."""
import io
import math
import os
from typing import Dict, List
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import types
import numpy as np
import pyquaternion
from tensorflow.io import gfile
import tensorflow_datasets as tfds
_IDS = {
'test': [1, 4, 8, 10],
'dev-test': [3, 9],
'dev': [2, 7],
'train': [
5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
'train_and_dev': [
5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 7]
}
_POSE_SUFFIX = '_pose.txt'
_LEN_POSE_SUFFIX = len(_POSE_SUFFIX)
_NUM_CLASSES = 5 # Number of quantization bins for the 3 angles.
_SPLITS = ['train', 'dev', 'dev-test', 'train_and_dev', 'test']
_ANGLES = ['roll', 'pitch', 'yaw']
_NANGLES = len(_ANGLES)
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'_depth.bin',
r'rgb.cal',
r'readme.txt',
r'_pose.bin',
r'_mask.png',
])
def _path_to_label_fn(path: str, file_to_labels: Dict[str,
List[int]]) -> List[int]:
filename = os.path.basename(path)
return file_to_labels[filename]
def biwi_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports BIWI dataset.
The dataset home page is at
https://www.kaggle.com/datasets/kmader/biwi-kinect-head-pose-database
This dataset contains over 20,000 face images of 20 people under different
3D pose conditions. The task is about pose estimation.
The creators provide camera calibration, and for each image a 3D point in
space with a 3x3 rotation matrix.
We turn this into a multilabel classification task by:
a) Computing roll, pitch and yaw.
b) Quantizing the above quantities.
Therefore for every image we need to predict 3 categories.
In the paper where we sourced this task, Pan et al. Self-Paced Deep Regression
Forests with Consideration on Underrepresented Examples ECCV 2020, the authors
create splits by randomly shuffling images.
Instead we create splits by using non-overlapping set of subjects for each
split, which tests for a harder generalization.
The package is organized in three folders: db_annotations, faces_0,
head_pose_masks. The data of interest is in the folder faces_0.
There we find one folder per subject.
For instance:
faces_0/08/frame_00150_rgb.png
is an image of subject 08, and the corresponding pose information is at:
faces_0/08/frame_00150_pose.txt
which in this case contains:
0.784991 -0.0678137 0.615784
-0.117134 0.959814 0.255021
-0.608332 -0.272319 0.745503
88.7665 -4.08406 878.874
i.e. 3x3 rotation matrix and vector of 3D coordinates.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
zip_file_path, *other_files_in_directory = gfile.listdir(dataset_path)
assert not other_files_in_directory
all_angles = []
data = dict()
# Read all the pose data, convert it to roll, pitch and yaw.
with zipfile.ZipFile(os.path.join(dataset_path, zip_file_path), 'r') as zf:
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if f.filename.endswith(_POSE_SUFFIX):
with io.TextIOWrapper(zf.open(f), encoding='utf-8') as fp:
rotation_list = []
counter = 0
for line in fp:
vals = line.strip().split(' ')
rotation_list.append([float(v) for v in vals])
counter += 1
if counter == _NANGLES:
break # Disregard 3D location.
rotation_mat = np.array(rotation_list)
q = pyquaternion.Quaternion(matrix=rotation_mat, atol=1e-4)
angles = q.yaw_pitch_roll
angles = [math.degrees(a) for a in angles]
all_angles.append(list(angles))
filename = f.filename[:-_LEN_POSE_SUFFIX]
img_fname = f'{filename}_rgb.png'
data[img_fname] = angles
# Quantize angles.
angles_mat = np.array(all_angles) # num_examples x 3
def uniform_bins(data, num_bins):
num_samples = len(data)
return np.interp(
# Add one to the number of bins because the first and last value are
# min and max, and if we want K intervals we need K+1 boundaries.
np.linspace(0, num_samples, num_bins + 1), np.arange(num_samples),
np.sort(data))
bins = []
for i in range(_NANGLES):
bins.append(uniform_bins(angles_mat[:, i], _NUM_CLASSES))
for name in data:
deg_angles = data[name]
sample_labels = []
for i in range(_NANGLES):
assert deg_angles[i] <= bins[i][-1] and deg_angles[i] >= bins[i][0]
quantized_angle = np.digitize(deg_angles[i], bins[i])
# Note: np.digitize returns indexes of bins s.t.:
# bins[i][j-1] <= x < bins[i][j]
# If x == bin[i][-1], it will be assigned to the next bin, with index
# _NUM_CLASSES + 1. Instead we want that boundary value to be mapped to
# the last bin, bins[i][-1].
if deg_angles[i] == bins[i][-1]:
quantized_angle = _NUM_CLASSES
# This will create a list of multinomial labels, which is currently not
# supported.
# sample_labels.append(quantized_angle - 1) # in [0, _NUM_CLASSES-1]
# Instead, we use binary labels, and here we list the indexes
# of the non-zero labels.
sample_labels.append((quantized_angle - 1) + _NUM_CLASSES * i)
data[name] = sample_labels
metadata = types.DatasetMetaData(
# In the binary setting only, this is the total number of binary labels.
num_classes=_NUM_CLASSES * _NANGLES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='multi-label',
image_type='faces',
),
features=tfds.features.FeaturesDict({
'multi_label':
tfds.features.Sequence(
tfds.features.ClassLabel(num_classes=_NUM_CLASSES)),
'png_encoded_image':
tfds.features.Image()
}))
def gen(split):
def path_filter_fn(path: str) -> bool:
# Remove files which are not rgb images.
if path not in data.keys():
return False
# Get subject id.
subject_id = os.path.basename(os.path.dirname(path))
assert len(subject_id) == 2
subject_id = int(subject_id)
return subject_id in _IDS[split]
return utils.generate_images_from_zip_files_with_multilabels(
dataset_path=dataset_path,
zip_file_names=[zip_file_path],
path_to_attributes_fn=lambda path: data[path],
ignored_files_regex=_IGNORED_FILES_REGEX,
path_filter=path_filter_fn)
per_split_gen = {}
for split in _SPLITS:
per_split_gen[split] = gen(split)
return metadata, per_split_gen
biwi_dataset = types.DownloadableDataset(
name='biwi',
download_urls=[
types.KaggleDataset(
dataset_name='kmader/biwi-kinect-head-pose-database',
checksum='59d49d96e5719f248f6d66f8ff205569')
],
paper_title='Random Forests for Real Time 3D Face Analysis',
authors='Fanelli, Gabriele and Dantone, Matthias and Gall, Juergen and Fossati, Andrea and Van Gool, Luc',
year='2013',
website_url='https://www.kaggle.com/datasets/kmader/biwi-kinect-head-pose-database',
handler=biwi_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/biwi.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TID handler."""
import os
import subprocess
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
_NUM_SUBJECTS = 25
_PROPERTIES_PER_YEAR = {
2008: dict(num_distortion_types=17, num_distortion_levels=4),
2013: dict(num_distortion_types=24, num_distortion_levels=5),
}
_NOISE_FNAME_2008 = 'MSSIM.TXT'
_NOISE_FNAME_2013 = 'MSSIM.txt'
_NUM_BUCKETS = 5
def tid_handler(
dataset_path: str,
year: int,
apply_unrar: bool = True
) -> types.HandlerOutput:
"""Handler for TID dataset."""
metadata = types.DatasetMetaData(
num_classes=_NUM_BUCKETS,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='quality',
))
# Unarachive the images.
if apply_unrar:
# Normally, we assume that we have access to unrar utility and use
# rar archive.
subprocess.call(['unrar', 'e', '-y', '-idq', f'tid{year}.rar'],
cwd=dataset_path)
else:
# In this case, we assume that we pre-archived a file in a tar format.
tarfile.open(os.path.join(dataset_path, f'tid{year}.tar.gz'),
'r|gz').extractall(path=dataset_path)
image_fname_to_distortion = {}
noise_fname = _NOISE_FNAME_2008 if year == 2008 else _NOISE_FNAME_2013
with gfile.GFile(os.path.join(dataset_path, noise_fname), 'r') as f:
lines = iter(f.readlines())
num_distortion_types = _PROPERTIES_PER_YEAR[year]['num_distortion_types']
num_distortion_levels = _PROPERTIES_PER_YEAR[year]['num_distortion_levels']
for subject_id in range(1, _NUM_SUBJECTS + 1):
for distortion_id in range(1, num_distortion_types + 1):
for distortion_level in range(1, num_distortion_levels + 1):
image_fname = f'i{subject_id:02d}_{distortion_id:02d}_{distortion_level}.bmp'
distortion = float(next(lines).strip())
image_fname_to_distortion[image_fname] = distortion
tot_num_samples = len(image_fname_to_distortion)
num_examples_per_bucket = tot_num_samples // _NUM_BUCKETS
sorted_distortions = sorted(image_fname_to_distortion.values())
buckets = []
for bucket_id in range(_NUM_BUCKETS):
buckets.append(
sorted_distortions[bucket_id * num_examples_per_bucket:(bucket_id + 1) *
num_examples_per_bucket])
def find_bucket(distortion, buckets):
for bucket_id, bucket in enumerate(buckets):
if distortion in bucket:
return bucket_id
def gen(image_fname_to_distortion, buckets):
for fname in gfile.listdir(dataset_path):
if not fname.endswith('bmp'):
continue
image_fname = fname.lower()
if image_fname not in image_fname_to_distortion:
continue
distortion = image_fname_to_distortion[image_fname]
bucket_id = find_bucket(distortion, buckets)
image = Image.open(os.path.join(dataset_path, fname))
yield types.Example(image=image, label=bucket_id, multi_label=None)
make_gen = lambda: gen(image_fname_to_distortion, buckets)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
tid2008_dataset = types.DownloadableDataset(
name='tid2008',
download_urls=[
types.DownloadableArtefact(
url='https://ponomarenko.info/tid/tid2008.rar',
checksum='732aeec405a57b589aeee4f1c58a4c1b')
],
website_url='https://ponomarenko.info/tid2008.htm',
handler=lambda ds: tid_handler(ds, 2008))
tid2013_dataset = types.DownloadableDataset(
name='tid2013',
download_urls=[
types.DownloadableArtefact(
url='https://ponomarenko.info/tid2013/tid2013.rar',
checksum='2c42bec2407adc49cf4d7858daf8f99b')
],
website_url='https://ponomarenko.info/tid2013.htm',
handler=lambda ds: tid_handler(ds, 2013))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/tid.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UMD handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_NUM_CLASSES = 25
_IGNORED_FILES_REGEX = r'.*\.db$'
def _label_from_filename(filename: str) -> int:
"""Extracts a label given a filename for the UMD dataset."""
label = int(os.path.split(os.path.split(filename)[0])[1])
label -= 1
assert 0 <= label <= _NUM_CLASSES-1
return label
def umd_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports UMD Texture dataset.
The dataset home page is at
http://users.umiacs.umd.edu/~fer/website-texture/texture.htm.
The dataset comes with two zip files containing 12 and 13 directories
(one per class) for a total of 25 classes.
We define the mapping from directory name to labels by subtracting one from it
as the class directories start from 1 (and go to 25).
The dataset does not come with a pre-defined train/ val/ test splits. We
define those ourselves.
Args:
dataset_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
ds_files = gfile.listdir(dataset_path)
assert len(ds_files) == 2
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='texture'))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
ds_files,
path_to_label_fn=_label_from_filename,
ignored_files_regex=_IGNORED_FILES_REGEX)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
umd_dataset = types.DownloadableDataset(
name='umd',
download_urls=[
types.DownloadableArtefact(
url='http://users.umiacs.umd.edu/~fer/High-resolution-data-base/textures-1.zip',
checksum='818b5b13035374cffd4db604e718ddbf'),
types.DownloadableArtefact(
url='http://users.umiacs.umd.edu/~fer/High-resolution-data-base/textures-2.zip',
checksum='e9853d0f7eaa9e57c4756e9017d0cbc9')
],
website_url='http://users.umiacs.umd.edu/~fer/website-texture/texture.htm',
paper_title='A projective invariant for textures',
authors='Yong Xu and Hui Ji and Cornelia Fermuller',
year='2006',
handler=umd_handler,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/umd.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sketch handler."""
import os
from typing import Dict
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id: Dict[str, int]) -> int:
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
def sketch_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for Sketch dataset."""
files = gfile.listdir(dataset_path)
labels = [
'snail', 'candle', 'crane (machine)', 'parking meter', 'bathtub',
'loudspeaker', 'bulldozer', 'skateboard', 'ant', 'radio', 'tennis-racket',
'envelope', 'house', 'person walking', 'basket', 'rainbow', 'paper clip',
'alarm clock', 'screwdriver', 'cactus', 'umbrella', 'carrot', 'fan',
'kangaroo', 'bell', 't-shirt', 'hedgehog', 'santa claus', 'angel',
'trousers', 'eyeglasses', 'pretzel', 'snake', 'elephant', 'frying-pan',
'bread', 'rollerblades', 'tomato', 'cake', 'couch', 'pizza', 'crown',
'cannon', 'baseball bat', 'moon', 'potted plant', 'cup', 'syringe',
'pipe (for smoking)', 'hand', 'telephone', 'pumpkin', 'race car', 'table',
'brain', 'snowboard', 'flashlight', 'cloud', 'helmet', 'monkey',
'ice-cream-cone', 'wineglass', 'strawberry', 'speed-boat', 'cigarette',
'pigeon', 'book', 'lion', 'rabbit', 'violin', 'grenade', 'skull',
'car (sedan)', 'bee', 'head', 'spoon', 'ship', 'laptop', 'diamond',
'church', 'spider', 'wheelbarrow', 'flower with stem', 'mosquito',
'apple', 'giraffe', 'submarine', 'tree', 'harp', 'face', 'boomerang',
'floor lamp', 'fork', 'horse', 'scissors', 'cell phone', 'comb',
'beer-mug', 'helicopter', 'hot air balloon', 'bear (animal)', 'lightbulb',
'trombone', 'computer monitor', 'computer-mouse', 'fire hydrant',
'squirrel', 'camera', 'binoculars', 'sponge bob', 'streetlight', 'blimp',
'satellite', 'rooster', 'key', 'windmill', 'duck', 'zebra', 'armchair',
'skyscraper', 'bookshelf', 'shark', 'tablelamp', 'nose', 'truck', 'fish',
'chandelier', 'bottle opener', 'mailbox', 'donut', 'door', 'chair',
'castle', 'ipod', 'power outlet', 'wrist-watch', 'wheel', 'dog', 'bus',
'foot', 'panda', 'megaphone', 'microscope', 'bench', 'snowman', 'mug',
'sun', 'wine-bottle', 'suv', 'vase', 'door handle', 'arm', 'ear',
'hot-dog', 'ladder', 'banana', 'toilet', 'pineapple', 'mushroom',
'dolphin', 'mouse (animal)', 'owl', 'hat', 'palm tree', 'tv',
'calculator', 'bicycle', 'standing bird', 'bowl', 'ashtray', 'trumpet',
'microphone', 'saxophone', 'leaf', 'tire', 'bed', 'shovel', 'socks',
'traffic light', 'feather', 'crab', 'frog', 'walkie talkie', 'purse',
'hammer', 'parrot', 'rifle', 'sailboat', 'scorpion', 'head-phones',
'backpack', 'toothbrush', 'guitar', 'human-skeleton', 'bridge', 'bush',
'axe', 'sheep', 'mermaid', 'eye', 'pig', 'cabinet', 'sword',
'satellite dish', 'keyboard', 'stapler', 'teacup', 'present', 'motorbike',
'airplane', 'canoe', 'pear', 'teddy-bear', 'knife', 'butterfly', 'camel',
'flying saucer', 'pickup truck', 'van', 'swan', 'sea turtle', 'teapot',
'tractor', 'hourglass', 'tooth', 'lobster', 'hamburger', 'grapes',
'mouth', 'parachute', 'dragon', 'person sitting', 'pen', 'train', 'tent',
'tiger', 'revolver', 'suitcase', 'shoe', 'barn', 'flying bird', 'octopus',
'seagull', 'space shuttle', 'cat', 'crocodile', 'piano', 'penguin', 'cow',
'lighter'
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=250,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
ignored_files_regex = [eu.DEFAULT_IGNORED_FILES_REGEX, r'filelist.txt']
ignored_files_regex = '|'.join(ignored_files_regex)
def make_gen_fn():
return eu.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
ignored_files_regex=ignored_files_regex)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
sketch_dataset = types.DownloadableDataset(
name='sketch',
download_urls=[
types.DownloadableArtefact(
url='https://cybertron.cg.tu-berlin.de/eitz/projects/classifysketch/sketches_png.zip',
checksum='023123df86a928a5273e3ba11990d8fd')
],
website_url='https://cybertron.cg.tu-berlin.de/eitz/projects/classifysketch',
handler=sketch_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/sketch.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pneumonia chest X-RAY dataset."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits as su
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'metadata.xlsx',
r'README.md.txt',
r'__MACOSX',
r'DS_Store',
])
_LABELS = ['NORMAL', 'PNEUMONIA']
def _path_to_label_fn(path: str, label_to_id):
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
# pylint:disable=missing-function-docstring
def pneumonia_chest_xray_handler(dataset_path: str) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=2,
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='xray',
))
# pylint:disable=g-long-lambda
def make_gen_fn(split, label_to_id):
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
path_filter=lambda x: x.startswith(
os.path.join('chest_xray/chest_xray', split)),
ignored_files_regex=_IGNORED_FILES_REGEX,
convert_mode='L')
train_split_gen_fn = lambda: make_gen_fn('train', label_to_id)
# Train and dev.
per_split_gen = su.random_split_generator_into_splits_with_fractions(
train_split_gen_fn, su.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
su.MERGED_TRAIN_AND_DEV)
per_split_gen['dev-test'] = make_gen_fn('val', label_to_id)
per_split_gen['test'] = make_gen_fn('test', label_to_id)
return metadata, per_split_gen
pneumonia_chest_xray_dataset = types.DownloadableDataset(
name='pneumonia_chest_xray',
download_urls=[
types.KaggleDataset(
dataset_name='paultimothymooney/chest-xray-pneumonia',
checksum='930763e3580e76de9c2c849ec933b5d6')
],
website_url='https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia',
handler=pneumonia_chest_xray_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pneumonia_chest_xray.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact dataset handler.
We use a similar experiment setting as in Sharmanska and Quadrianto (2016) where
we include the category level illustrations in the training set together with
a randomly selected subset of real images, and test on hold-out real images.
Reference:
Sharmanska, Viktoriia, and Quadrianto, Novi. "Learning from the mistakes of
others: Matching errors in cross-dataset learning." In Proceedings of the
IEEE Conference on Computer Vision and Pattern Recognition, pp. 3967-3975. 2016.
"""
import collections
import functools
import os
from typing import Dict, List, Sequence, Tuple
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import types
import numpy as np
_STAND_ALONE_DATASET_IMAGES_ZIP_PATH = "interact_stand-alone_dataset_images.zip"
_STAND_ALONE_DATASET_ANNOTATION_ZIP_PATH = "interact_stand-alone_dataset_annotations.zip"
_STAND_ALONE_DATASET_ANNOTATION_FILENAME = "interact_final_dataset_labels_and_images.txt"
_ILLUSTRATION_CATEGORY_DATASET_IMAGES_ZIP_PATH = "interact_illustration_category_dataset_images.zip"
_ILLUSTRATION_CATEGORY_DATASET_ANNOTATION_ZIP_PATH = "interact_illustration_category_dataset_annotations.zip"
_ILLUSTRATION_CATEGORY_DATASET_ANNOTATION_FILENAME = "interact_illustration_category_dataset_labels_and_images.txt"
_NUM_CLASSES = 60
_PERC_TEST = 0.2
_PERC_DEV_IN_TRAIN = 0.15
_PERC_DEV_TEST_IN_TRAIN = 0.15
# The following image files raise exceptions about truncation in image.resize.
_TRUNCATED_IMAGES_TO_EXCLUDE = [
"imgs/22TCSTMOFXRDFDVIK9PDTXJK5OZTQ6_20.jpg",
"imgs/249YW2GTP9RAU1I25WX94SOKQN6OTI_16.jpg",
"imgs/24PFCD45XCETAM5TCF9N9TGUNNTZXA_02.jpg"
]
def _split_paths(paths: Sequence[str], rng: np.random.Generator
) -> Dict[str, List[str]]:
"""Randomly splits file paths to train, dev, dev-test, and test subsets."""
# Ratios of the train, dev and dev-test splits.
split_ratios = np.array([
(1 - _PERC_TEST) * (1 - _PERC_DEV_IN_TRAIN - _PERC_DEV_IN_TRAIN),
(1 - _PERC_TEST) * _PERC_DEV_IN_TRAIN,
(1 - _PERC_TEST) * _PERC_DEV_TEST_IN_TRAIN])
total_size = len(paths)
split_sizes = (total_size * split_ratios).astype(np.int32)
acc_split_sizes = np.cumsum(split_sizes)
# Randomly splitting the images of every class into .
inds = rng.permutation(total_size)
train_inds, dev_inds, dev_test_inds, test_inds = np.split(inds,
acc_split_sizes)
return {
"train": [paths[i] for i in train_inds],
"dev": [paths[i] for i in dev_inds],
"dev-test": [paths[i] for i in dev_test_inds],
"test": [paths[i] for i in test_inds],
"train_and_dev": [
paths[i] for i in np.concatenate([train_inds, dev_inds])
],
}
def interact_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports interact dataset.
Each image file is named as: "*##.bmp", where * is the index of subject in
letter starting from "A", and ## is the index of face image for this subject.
The image file name is from "A00.bmp" to "M74.bmp".
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
label_to_paths = collections.defaultdict(list)
path_to_label = {}
label_to_name = {}
# Load stand-alone dataset annotations.
label_to_paths, path_to_label, label_to_name = _load_annotations(
artifacts_path, _STAND_ALONE_DATASET_ANNOTATION_ZIP_PATH,
_STAND_ALONE_DATASET_ANNOTATION_FILENAME)
labels = sorted(label_to_name.keys())
if min(labels) != 0 or max(labels) != len(labels) - 1:
raise ValueError("Class label does not cover a continguous range from 0.")
if len(labels) != _NUM_CLASSES:
raise ValueError(f"Number of classes ({len(labels)}) does not match the "
f"expected value ({_NUM_CLASSES}).")
split_to_paths = _split_images_per_class(label_to_paths)
# Load illustration dataset annotations.
(illustration_label_to_paths, illustration_path_to_label,
illustration_label_to_name) = _load_annotations(
artifacts_path, _ILLUSTRATION_CATEGORY_DATASET_ANNOTATION_ZIP_PATH,
_ILLUSTRATION_CATEGORY_DATASET_ANNOTATION_FILENAME)
# Verify the illustration_label_to_name dict matches label_to_name.
for label, class_name in illustration_label_to_name.items():
if label not in label_to_name or class_name != label_to_name[label]:
raise ValueError("Found mismatched category label and name pair.")
# Add all illustration images to the training set.
path_to_label.update(illustration_path_to_label)
for paths in illustration_label_to_paths.values():
split_to_paths["train"].extend(paths)
# Convert lists to sets for faster indexing.
split_to_paths = {split: set(paths)
for split, paths in split_to_paths.items()}
def gen(split):
split_paths = split_to_paths[split]
return extraction_utils.generate_images_from_zip_files(
artifacts_path, [
_STAND_ALONE_DATASET_IMAGES_ZIP_PATH,
_ILLUSTRATION_CATEGORY_DATASET_IMAGES_ZIP_PATH
],
path_to_label_fn=lambda path: path_to_label[path],
path_filter=lambda path: path in split_paths)
class_names = [label_to_name[i] for i in range(_NUM_CLASSES)]
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata={
"class_names": class_names,
"split_to_paths": split_to_paths,
})
return metadata, {split: gen(split) for split in split_to_paths}
def _load_annotations(
artifacts_path: str, zip_path: str, anno_filename: str
) -> Tuple[Dict[types.Label, List[str]], Dict[str, types.Label], Dict[
types.Label, str]]:
"""Load annotations from one source."""
label_to_paths = collections.defaultdict(list)
path_to_label = {}
label_to_name = {}
with zipfile.ZipFile(os.path.join(artifacts_path, zip_path), "r") as zf:
with zf.open(anno_filename, "r") as f:
for line in f:
class_label, class_name, image_path = _parse_line(line)
if image_path in _TRUNCATED_IMAGES_TO_EXCLUDE:
continue
label_to_paths[class_label].append(image_path)
path_to_label[image_path] = class_label
# Update the label_to_name dict.
if class_label not in label_to_name:
label_to_name[class_label] = class_name
elif label_to_name[class_label] != class_name:
raise ValueError("Multiple class names: "
f"{label_to_name[class_label]}, {class_name}, "
f"correspond to label {class_label}")
return label_to_paths, path_to_label, label_to_name
def _parse_line(line: bytes) -> Tuple[types.Label, str, str]:
"""Parses one line to image class label, class name and path."""
parts = line.decode().strip().split(";")
# Every line is in the following format:
# category_number_label;image_filename;category_semantic_label
class_label_str, fname, class_name = parts
class_label = int(class_label_str) - 1 # 0-based label.
image_path = f"imgs/{fname}"
return class_label, class_name, image_path
def _split_images_per_class(label_to_paths: Dict[types.Label, Sequence[str]]
) -> Dict[str, List[str]]:
"""Splits images into subsets per class and merge them."""
# Set the seed for random splitting.
rng = np.random.default_rng(seed=1)
split_to_paths = collections.defaultdict(list)
for paths in label_to_paths.values():
split_to_paths_per_class = _split_paths(paths, rng)
for split, paths_subset in split_to_paths_per_class.items():
split_to_paths[split].extend(paths_subset)
return split_to_paths
interact_dataset = types.DownloadableDataset(
name="interact",
download_urls=[
types.DownloadableArtefact(
url="https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/interact/interact_stand-alone_dataset/interact_stand-alone_dataset_images.zip",
checksum="93d7a853e0e596e7ef7fecc4808ffdf8"),
types.DownloadableArtefact(
url="https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/interact/interact_stand-alone_dataset/interact_stand-alone_dataset_annotations.zip",
checksum="aef1de2ad7c596a566cd78414a266f5f"),
types.DownloadableArtefact(
url="https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/interact/interact_illustration_category_dataset/interact_illustration_category_dataset_images.zip",
checksum="47c043e360a177c026fa7b7652fdf5b2"),
types.DownloadableArtefact(
url="https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/interact/interact_illustration_category_dataset/interact_illustration_category_dataset_annotations.zip",
checksum="56dec629f180077eb2a76b3f67c7b02f")
],
handler=functools.partial(interact_handler),
website_url="https://computing.ece.vt.edu/~santol/projects/zsl_via_visual_abstraction/interact/index.html",
paper_url="https://openreview.net/forum?id=rkbYYcWOZB",
paper_title="Zero-Shot Learning via Visual Abstraction",
authors="Antol, Stanislaw and Zitnick, C. Lawrence and Parikh, Devi",
year=2014)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/interact.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CMU AMP expression dataset handler."""
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import types
import numpy as np
_ZIP_PATH = "faceExpressionDatabase.zip"
_NUM_CLASSES = 13
_TOT_IMAS_PER_CLASS = 75
_IMAGE_SHAPE = (64, 64)
# Split all the images per class into train_all/test with 0.8:0.2. Then split
# the train_all subset into train/dev/dev-test with 0.7:0.15:0.15.
_PERC_TEST = 0.2
_PERC_DEV_IN_TRAIN = 0.15
_PERC_DEV_TEST_IN_TRAIN = 0.15
IGNORED_FILES_REGEX = r"readme\.txt"
def cmu_amp_expression_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports CMU AMP expression dataset.
The dataset home page is at
http://chenlab.ece.cornell.edu/projects/FaceAuthentication/Default.html.
Each image file is named as: "*##.bmp", where * is the index of subject in
letter starting from "A", and ## is the index of face image for this subject.
The image file name is from "A00.bmp" to "M74.bmp".
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
# Ratios of the train, dev and dev-test splits.
split_ratios = np.array([
(1 - _PERC_TEST) * (1 - _PERC_DEV_IN_TRAIN - _PERC_DEV_IN_TRAIN),
(1 - _PERC_TEST) * _PERC_DEV_IN_TRAIN,
(1 - _PERC_TEST) * _PERC_DEV_TEST_IN_TRAIN])
split_sizes = (_TOT_IMAS_PER_CLASS * split_ratios).astype(np.int32)
acc_split_sizes = np.cumsum(split_sizes)
# Set the seed for random splitting.
rng = np.random.default_rng(seed=1)
train_fnames = []
dev_fnames = []
dev_test_fnames = []
test_fnames = []
for label in range(_NUM_CLASSES):
# Randomly splitting the images of every class into train, dev, dev-test,
# and test subsets.
inds = rng.permutation(_TOT_IMAS_PER_CLASS)
train_inds, dev_inds, dev_test_inds, test_inds = np.split(inds,
acc_split_sizes)
train_fnames.extend([_make_fname(label, ind) for ind in train_inds])
dev_fnames.extend([_make_fname(label, id) for id in dev_inds])
dev_test_fnames.extend([_make_fname(label, id) for id in dev_test_inds])
test_fnames.extend([_make_fname(label, id) for id in test_inds])
split_to_fnames = {
"train": set(train_fnames),
"dev": set(dev_fnames),
"dev-test": set(dev_test_fnames),
"test": set(test_fnames),
"train_and_dev": set(train_fnames).union(set(dev_fnames)),
}
def gen(split):
split_fnames = split_to_fnames[split]
return extraction_utils.generate_images_from_zip_files(
artifacts_path, [_ZIP_PATH],
path_to_label_fn=_label_from_path,
ignored_files_regex=IGNORED_FILES_REGEX,
path_filter=lambda path: path in split_fnames)
class_names = [_label_to_name(i) for i in range(_NUM_CLASSES)]
metadata = types.DatasetMetaData(
num_classes=_NUM_CLASSES,
num_channels=1,
image_shape=_IMAGE_SHAPE, # Ignored for now.
additional_metadata={
"class_names": class_names,
"split_to_fnames": split_to_fnames,
"train_size_per_class": split_sizes[0],
"dev_size_per_class": split_sizes[1],
"dev_test_size_per_class": split_sizes[2],
"test_size_per_class": _TOT_IMAS_PER_CLASS - acc_split_sizes[-1],
})
return metadata, {split: gen(split) for split in split_to_fnames}
def _label_to_name(label: int) -> str:
return chr(ord("A") + label)
def _make_fname(label: int, ind: int) -> str:
return f"{_label_to_name(label)}{ind:02d}.bmp"
def _label_from_path(path: str) -> types.Label:
"""Get 0-based label from the first character of a filename like *##.bmp."""
return ord(path[0]) - ord("A")
cmu_amp_expression_dataset = types.DownloadableDataset(
name="cmu_amp_expression",
download_urls=[
types.DownloadableArtefact(
url="http://chenlab.ece.cornell.edu/_download/FaceAuthentication/faceExpressionDatabase.zip",
checksum="f6c2fdf87c095c17e2527c36a0528966")
],
handler=cmu_amp_expression_handler,
website_url="http://chenlab.ece.cornell.edu/projects/FaceAuthentication/Default.html",
paper_url="https://doi.org/10.1016/S0031-3203(02)00033-X",
paper_title="Face Authentication for Multiple Subjects Using Eigenflow",
authors="Liu, Xiaoming; Chen, Tsuhan; Kumar, B. V. K. Vijaya",
year=2003,
)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/cmu_amp_expression.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""German Traffic Sign Recognition dataset handler."""
import io
import os
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
_TRAIN_ZIP_PATH = "GTSRB_Final_Training_Images.zip"
_TEST_ZIP_IMAGES_PATH = "GTSRB_Final_Test_Images.zip"
_TEST_ZIP_LABELS_PATH = "GTSRB_Final_Test_GT.zip"
_LABEL_FILE = "GT-final_test.csv"
_IGNORED_FILES_REGEX = r".*\.csv$|.*\.txt$"
def german_tsr_handler(artifacts_path: str) -> types.HandlerOutput:
"""Imports German Traffic Sign Recognition dataset.
The dataset home page is at https://benchmark.ini.rub.de/gtsrb_dataset.html.
The dataset comes with two zip files, one for the training and one for the
testing files.
The training directory has one folder per class.
The testing directory has all images in one folder with a csv file specifying
the labels of the test images.
Images have different spatial resolution.
There are 43 classes in total, and about 50,000 images.
Args:
artifacts_path: Path with downloaded artifacts.
Returns:
Metadata and generator functions.
"""
label_to_id = {}
num_classes = 43
for cc in range(num_classes):
label_to_id["%05d" % cc] = cc
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type="classification",
image_type="object"))
def gen(path, label_from_path):
return extraction_utils.generate_images_from_zip_files(
artifacts_path, [path],
label_from_path,
ignored_files_regex=_IGNORED_FILES_REGEX,
convert_mode="RGB")
# Prepare the label mapping for the test images.
path_to_label = dict()
with zipfile.ZipFile(
os.path.join(artifacts_path, _TEST_ZIP_LABELS_PATH), "r") as zf:
with io.TextIOWrapper(zf.open(_LABEL_FILE, "r"), encoding="utf-8") as f:
for ll in f:
fields = ll.split(";")
filename = fields[0]
label = fields[-1].strip()
path_to_label[filename] = label
def _label_from_path_ts(path: str) -> types.Label:
fields = path.split("/")
return int(path_to_label[fields[-1].strip()])
make_gen_fn = lambda: gen(_TRAIN_ZIP_PATH, _label_from_path_tr)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen["test"] = gen(_TEST_ZIP_IMAGES_PATH, _label_from_path_ts)
return metadata, per_split_gen
def _label_from_path_tr(path: str) -> types.Label:
return int(os.path.basename(os.path.dirname(path)))
german_tsr_dataset = types.DownloadableDataset(
name="german_tsr",
download_urls=[
types.DownloadableArtefact(
url="https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Training_Images.zip",
checksum="f33fd80ac59bff73c82d25ab499e03a3"),
types.DownloadableArtefact(
url="https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_Images.zip",
checksum="c7e4e6327067d32654124b0fe9e82185"),
types.DownloadableArtefact(
url="https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_GT.zip",
checksum="fe31e9c9270bbcd7b84b7f21a9d9d9e5")
],
paper_title="The German Traffic Sign Recognition Benchmark: A multi-class classification competition",
authors="Johannes Stallkamp and Marc Schlipsing and Jan Salmen and Christian Igel",
year="2011",
handler=german_tsr_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/german_tsr.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Animal WEB handler."""
import os
import subprocess
import tarfile
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
_RAR_FNAME = 'animal_dataset_v1_c.rar'
_TAR_FNAME = 'animal_dataset_v1_c.tar.gz'
_LABELS = [
'saluki', 'kultarr', 'bighornsheep', 'bandedmongoose', 'fox', 'goat',
'tasmaniandevil', 'wallaroo', 'whiptailwallaby', 'feralhorse', 'binturong',
'geoffroyscat', 'klipspringer', 'vervetmonkey', 'possum', 'arcticwolf',
'rustyspottedgenet', 'capegraymongoose', 'elk', 'commonbrownlemur', 'panda',
'amurtiger', 'quokka', 'indri', 'debrazzasmonkey', 'collaredpeccary',
'cloudedleopard', 'emperorpenguin', 'zebu', 'wildcat', 'lionpd', 'walrus',
'irishwolfhound', 'hoodedseal', 'camel', 'patasmonkey', 'commongenet',
'italiangreyhound', 'viverratangalungamalayancivet', 'bullmastif',
'blackbackedjackal', 'pekingesedog', 'lumholtzstreekangaroo', 'komondor',
'toquemacaque', 'longnosedmongoose', 'matschiestreekangaroo', 'woodchuck',
'slendermongoose', 'gundi', 'animal', 'feralcat', 'chamois',
'borneanslowloris', 'dachshund', 'yelloweyedpenguin', 'harpseal', 'bharal',
'blueeyedblacklemur', 'treeshrew', 'dallsheep', 'brushtailedrockwallaby',
'marmoset', 'goldenbamboolemur', 'greaterbamboolemur', 'balinesecat',
'californiansealion', 'fallowdeer', 'adeliepenguin', 'waterbuck',
'mareebarockwallaby', 'horse', 'zebra', 'blackrhino', 'australianterrier',
'wildebeest', 'monte', 'oncilla', 'armadillo', 'frenchbulldog',
'swamprabbit', 'cheetah', 'gentoopenguin', 'greylangur', 'mouflon',
'alaskanmalamute', 'amurleopard', 'dhole', 'baikalseal', 'brownrat',
'kingpenguin', 'redpanda', 'hamster', 'echidna', 'bushbaby', 'fishingcat',
'westernlesserbamboolemur', 'beardedseal', 'colo', 'roanantelope',
'harbourseal', 'chinstrappenguin', 'giantschnauzer', 'collaredbrownlemur',
'stripedhyena', 'opossum', 'guanaco', 'wisent', 'visayanwartypig',
'barbarymacaque', 'onager', 'caiman', 'feralgoat', 'commonwarthog',
'hartebeest', 'arcticfox', 'whiteheadedlemur', 'spottedseal', 'capebuffalo',
'medraneanmonkseal', 'jaguar', 'wildass', 'barbarysheep', 'gibbons',
'spottedhyena', 'leopardcat', 'hedgehog', 'uc', 'topi', 'commonchimpanzee',
'dunnart', 'agilewallaby', 'sundaslowloris', 'wombat', 'chinesegoral',
'caribou', 'weddellseal', 'canadianlynx', 'husky', 'liger',
'sharpesgrysbok', 'graywolf', 'hare', 'caracal', 'hyrax', 'platypus',
'capefox', 'eurasianlynx', 'oribi', 'northernelephantseal',
'centralchimpanzee', 'dormouse', 'gerbil', 'cougar', 'capybara', 'ferret',
'przewalskihorse', 'crestedpenguin', 'oryx', 'steinbucksteenbok', 'nilgai',
'mangabey', 'australianshepherd', 'spidermonkey', 'monkey', 'brownhyena',
'roedeer', 'bull', 'pardinegenet', 'anoa', 'leopard', 'swampwallaby',
'bonobo', 'cottonrat', 'vole', 'humboldtpenguin', 'africanpenguin',
'goldenjackal', 'gorilla', 'commonkusimanse', 'redtailmonkey', 'aardwolf',
'suni', 'blackandwhiteruffedlemar', 'chowchow', 'raccoon', 'bolognesedog',
'kangaroo', 'dalmatian', 'gharial', 'australiancattledog', 'ruddymongoose',
'nightmonkey', 'swiftfox', 'weasel', 'easternchimpanzee', 'bison', 'yak',
'chital', 'titi', 'woollymonkey', 'reedbuck', 'pallascat',
'smallasianmongoose', 'grizzlybear', 'rustyspottedcat', 'coatis',
'redruffedlemur', 'kinkajou', 'parmawallaby', 'coypu', 'westernchimpanzee',
'asianpalmcivet', 'domesticcat', 'giantotter', 'pygmyrabbit', 'grayfox',
'kiang', 'pademelon', 'lemur', 'wapiti', 'asiangoldencat', 'agouti',
'bandedpalmcivet', 'fieldmouse', 'junglecat', 'anteater', 'mexicanwolf',
'largespottedgenet', 'beatingmongoose', 'goodfellowstreekangaroo',
'flyingsquirrel', 'wolverine', 'guineapig', 'dassie', 'orangutan',
'greyseal', 'ocelot', 'howler', 'germanpinscher', 'koala', 'bilby',
'goldenretriever', 'galagos', 'leopardseal', 'spottedneckedotter',
'crownedlemur', 'owstonspalmcivet', 'donkey', 'duiker', 'pygmyslowloris',
'cservalserval', 'hippopotamus', 'tamarin', 'alaskanhare', 'badger',
'dingo', 'boar', 'goldenlangur', 'greatdane', 'jackrabbit', 'uakari',
'colobus', 'fennecfox', 'sandcat', 'bamboolemur', 'bengalslowloris',
'dugong', 'rhesusmonkey', 'marshmongoose', 'littlebluepenguin', 'hogdeer',
'redbelliedsquirrel', 'commondwarfmongoose', 'corsacfox', 'whitewolf',
'addax', 'stripeneckedmongoose', 'deermouse', 'japanesemacaque', 'giraffe',
'babirusa', 'hamadryasbaboon', 'douclangur', 'anatolianshepherddog',
'bluemonkey', 'muskox', 'yellowfootedrockwallaby', 'gerenuk', 'doberman',
'hawaiianmonkseal', 'magellanicpenguin', 'crabeaterseal', 'bobcat',
'feralcattle', 'jaguarundi', 'potoroo', 'muntjacdeer', 'geladababoon',
'harvestmouse', 'rhinoceros', 'olivebaboon', 'buffalo', 'patagonianmara',
'bushbuck', 'blackbuck', 'beaver', 'zonkey', 'bordercollie',
'southernelephantseal', 'tammarwallaby', 'olingos', 'quoll',
'easternlesserbamboolemur', 'bandicoot', 'alpineibex', 'redneckedwallaby',
'bear', 'japaneseserow', 'galapagossealion', 'muriqui', 'blackfootedcat',
'cacomistle', 'ringtail', 'germanshepherddog', 'ribbonseal', 'domesticdog',
'lutung', 'tarsiers', 'margay', 'bongo', 'francoislangur', 'potto',
'whitetaileddeer', 'australiansealion', 'capuchinmonkey', 'dikdik',
'aardvark', 'snowleopard', 'banteng', 'chihuahua', 'proboscismonkey',
'ayeaye', 'pantanalcat', 'ethiopianwolf', 'africanwilddog', 'deer',
'alpaca', 'servalinegenet', 'chipmunk', 'degu', 'urial'
]
def animal_web_handler(
dataset_path: str,
apply_unrar: bool = True
) -> types.HandlerOutput:
"""Handler for AnimalWeb dataset."""
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(_LABELS)))
metadata = types.DatasetMetaData(
num_classes=len(label_to_id),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object',
))
# Unarachive the images.
if apply_unrar:
# Normally, we assume that we have access to unrar utility and use
# rar archive.
subprocess.call(['unrar', 'e', '-y', '-idq', _RAR_FNAME], cwd=dataset_path)
else:
# In this case, we assume that we pre-archived a file in a tar format.
tarfile.open(os.path.join(dataset_path, _TAR_FNAME),
'r|gz').extractall(path=dataset_path)
def make_gen():
for fname in gfile.listdir(dataset_path):
if not fname.endswith('jpg'):
continue
image_fname = os.path.splitext(fname)[0]
label_name = image_fname.split('_')[0]
label = label_to_id[label_name]
image = Image.open(os.path.join(dataset_path, fname)).convert('RGB')
yield (image, label)
deduplicated_data_gen = eu.deduplicate_data_generator(make_gen())
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
deduplicated_data_gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
animal_web_dataset = types.DownloadableDataset(
name='animal_web',
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/uc?export=download&id=13PbHxUofhdJLZzql3TyqL22bQJ3HwDK4&confirm=y',
checksum='d2d7e0a584ee4bd9badc74a9f2ef3b82')
],
website_url='https://fdmaproject.wordpress.com/author/fdmaproject/',
handler=animal_web_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/animal_web.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coil20 dataset handler."""
import functools
import re
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
def coil20_handler(download_path: str, processed: bool) -> types.HandlerOutput:
"""Imports Coil20 dataset."""
if processed:
# First filnames "coil-20-unproc.zip" are:
# coil-20-proc/obj1__11.png
# coil-20-proc/obj1__12.png
# coil-20-proc/obj1__13.png
# [...]
archive_fname = "coil-20-proc.zip"
fname_re = re.compile(r"coil-20-proc/obj(\d+)__\d+\.png")
num_classes = 20
else:
# First filnames "coil-20-unproc.zip" are:
# coil-20-unproc/obj1__11.png
# coil-20-unproc/obj1__12.png
# coil-20-unproc/obj1__13.png
# [...]
archive_fname = "coil-20-unproc.zip"
fname_re = re.compile(r"coil-20-unproc/obj(\d+)__\d+\.png")
num_classes = 5
def path_to_label_fn(fname):
fname_match = fname_re.match(fname)
if not fname_match:
return None
else:
return int(fname_match.group(1)) - 1
def gen_split():
return extraction_utils.generate_images_from_zip_files(
download_path, [archive_fname],
path_to_label_fn=path_to_label_fn,
convert_mode="RGB")
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type="classification",
image_type="object"))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen_split, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
coil_20_dataset = types.DownloadableDataset(
name="coil20",
download_urls=[
types.DownloadableArtefact(
url="http://www.cs.columbia.edu/CAVE/databases/SLAM_coil-20_coil-100/coil-20/coil-20-proc.zip",
checksum="464dec76a6abfcd00e8de6cf1e7d0acc")
],
website_url="https://www.cs.columbia.edu/CAVE/software/softlib/coil-20.php",
paper_title="Columbia Object Image Library (COIL-20)",
authors="S. A. Nene, S. K. Nayar and H. Murase",
year=1996,
# num_examples=1440,
handler=functools.partial(coil20_handler, processed=True))
coil_20_unproc_dataset = types.DownloadableDataset(
name="coil20_unproc",
download_urls=[
types.DownloadableArtefact(
url="http://www.cs.columbia.edu/CAVE/databases/SLAM_coil-20_coil-100/coil-20/coil-20-unproc.zip",
checksum="a04fd3c91db987e5e634e7e0945a430a")
],
website_url=coil_20_dataset.website_url,
paper_title=coil_20_dataset.paper_title,
authors=coil_20_dataset.authors,
year=coil_20_dataset.year,
# num_examples=350,
handler=functools.partial(coil20_handler, processed=False))
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/coil20.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ofxord flowers 17 handler."""
import os
import re
import tarfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import numpy as np
from PIL import Image
import scipy.io
_IMAGE_FILE_NAME = '17flowers.tgz'
_SPLIT_FILE_NAME = 'datasplits.mat'
_NUM_EXAMPLES_PER_CLASS = 80
def oxford_flowers_17_handler(dataset_path: str) -> types.HandlerOutput:
"""Oxford flowers dataset with 17 classes."""
split_data = scipy.io.loadmat(os.path.join(dataset_path, _SPLIT_FILE_NAME))
train_ids = split_data['trn1'].flatten()
val_ids = split_data['val1'].flatten()
test_ids = split_data['tst1'].flatten()
# Image file names are sorted such that every class has excatly 80 examples.
id_to_label_fn = lambda x: (x - 1) // _NUM_EXAMPLES_PER_CLASS
def gen(ids, id_to_label_fn):
with tarfile.open(os.path.join(dataset_path, _IMAGE_FILE_NAME), 'r') as tf:
for member in tf:
if member.isdir() or 'image' not in member.path:
continue
idx = int(re.search(r'jpg/image_([\d]+).jpg', member.path)[1])
if idx not in ids:
continue
image = Image.open(tf.extractfile(member))
image.load()
label = id_to_label_fn(idx)
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=17,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='object',
))
make_gen_fn = lambda: gen(train_ids, id_to_label_fn)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN_AND_DEV_ONLY,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['dev-test'] = gen(val_ids, id_to_label_fn)
per_split_gen['test'] = gen(test_ids, id_to_label_fn)
per_split_gen['all'] = gen(np.arange(start=1, stop=1360 + 1), id_to_label_fn)
return metadata, per_split_gen
oxford_flowers_17_dataset = types.DownloadableDataset(
name='oxford_flowers_17',
download_urls=[
types.DownloadableArtefact(
url='https://www.robots.ox.ac.uk/~vgg/data/flowers/17/17flowers.tgz',
checksum='b59a65d8d1a99cd66944d474e1289eab'),
types.DownloadableArtefact(
url='https://www.robots.ox.ac.uk/~vgg/data/flowers/17/datasplits.mat',
checksum='4828cddfd0d803c5abbdebcb1e148a1e')
],
website_url='https://www.robots.ox.ac.uk/~vgg/data/flowers/',
handler=oxford_flowers_17_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/oxford_flowers_17.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pascal VOC 2005 dataset handler."""
import os
import tarfile
from typing import Dict, List
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
from tensorflow.io import gfile
def pascal_voc2005_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports Pascal VOC 2005 dataset.
Links: http://host.robots.ox.ac.uk/pascal/VOC/databases.html#VOC2005_1
http://host.robots.ox.ac.uk/pascal/VOC/databases.html#VOC2005_2
Args:
dataset_path: Path with datafiles to download.
Returns:
Metadata and generator functions.
"""
files = gfile.listdir(dataset_path)
raw_file_path = dataset_path
assert len(files) == 2
labels = ['car', 'motorbike', 'bike', 'person']
label_to_id = {'car': 0,
'motorbike': 1,
'bike': 2,
'person': 3}
# Note: we removed from the original directories testing files from training
# set and training files from test set.
training_paths = {'car': ['VOC2005_1/PNGImages/Caltech_cars',
'VOC2005_1/PNGImages/ETHZ_sideviews-cars',
'VOC2005_1/PNGImages/TUGraz_cars'],
'bike': ['VOC2005_1/PNGImages/TUGraz_bike'],
'motorbike': [
'VOC2005_1/PNGImages/Caltech_motorbikes_side'],
'person': ['VOC2005_1/PNGImages/TUGraz_person']}
test_paths = {'car': ['VOC2005_2/PNGImages/car',
'VOC2005_2/PNGImages/voiture'],
'bike': ['VOC2005_2/PNGImages/bicycle',
'VOC2005_2/PNGImages/bike',
'VOC2005_2/PNGImages/velo'],
'motorbike': ['VOC2005_2/PNGImages/motocyclette',
'VOC2005_2/PNGImages/motorbike',
'VOC2005_2/PNGImages/motorcycle'],
'person': ['VOC2005_2/PNGImages/INRIA_graz-person-test',
'VOC2005_2/PNGImages/INRIA_inria-person-test',
'VOC2005_2/PNGImages/pedestrian',
'VOC2005_2/PNGImages/pieton']}
num_classes = len(labels)
metadata = types.DatasetMetaData(
num_classes=num_classes,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='object'))
def get_image_label(file: str, all_paths: Dict[str, List[str]],
raw_file_path: str):
with tarfile.open(os.path.join(raw_file_path, file), 'r:gz') as tar:
for label, paths in all_paths.items():
for path in paths:
all_images = [
tarinfo for tarinfo in tar.getmembers()
if (tarinfo.name.startswith(path) and
tarinfo.name.endswith('png'))]
assert all_images
for image_file in all_images:
f_obj = tar.extractfile(image_file)
image = Image.open(f_obj)
image.load()
yield (image, label_to_id[label])
def gen_split(is_test_split: bool):
if is_test_split:
# extract test set
file = [file for file in files if file.endswith('2.tar.gz')]
assert len(file) == 1
return get_image_label(file[0], test_paths, raw_file_path)
else:
# extract training set
file = [file for file in files if file.endswith('1.tar.gz')]
assert len(file) == 1
return get_image_label(file[0], training_paths, raw_file_path)
make_gen_fn = lambda: gen_split(is_test_split=False)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen_split(is_test_split=True)
return (metadata, per_split_gen)
pascal_voc2005_dataset = types.DownloadableDataset(
name='pascal_voc2005',
download_urls=[
types.DownloadableArtefact(
url='http://host.robots.ox.ac.uk/pascal/VOC/download/voc2005_1.tar.gz',
checksum='6fbeaee73a81c462b190ca837b977896'),
types.DownloadableArtefact(
url='http://host.robots.ox.ac.uk/pascal/VOC/download/voc2005_2.tar.gz',
checksum='15ec3d318b84ffdfa25f1e05de0014e2')
],
handler=pascal_voc2005_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/pascal_voc2005.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UIUC Texture dataset handler."""
import os
from typing import List
import zipfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _get_class_names_from_file(dataset_path: str, fname: str) -> List[str]:
names = []
with zipfile.ZipFile(os.path.join(dataset_path, fname), 'r') as zf:
for name in sorted(zf.namelist()):
f = zf.getinfo(name)
if f.is_dir():
names.append(os.path.split(f.filename)[0])
return names
def _get_all_class_names(dataset_path: str, filenames: List[str]) -> List[str]:
names = []
for fname in filenames:
names += _get_class_names_from_file(dataset_path, fname)
return sorted(names)
def _path_to_label_fn(class_names: List[str]) -> utils.PathToLabelFn:
def _path_to_label(fname):
class_name = os.path.split(fname)[0]
return class_names.index(class_name)
return _path_to_label
def uiuc_texture_handler(dataset_path: str) -> types.HandlerOutput:
"""Imports UIUC texture dataset.
The dataset is split over 5 zip files, each of them contains a subset of
classes. To generate the images, we go through the zip files sequentially and
yield the images and their corresponding labels. The dataset does not provide
random splits. The splits are then generated at random.
Link:
https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/index.html
Args:
dataset_path: Path with downloaded datafiles.
Returns:
Metadata and generator functions.
"""
filenames = gfile.listdir(dataset_path)
class_names = _get_all_class_names(dataset_path, filenames)
metadata = types.DatasetMetaData(
num_channels=1,
num_classes=len(class_names),
image_shape=(), # Ignored for now.
preprocessing='random_crop', # select random crops in the images
additional_metadata=dict(
labels=class_names,
task_type='classification',
image_type='texture'
))
def gen():
return utils.generate_images_from_zip_files(
dataset_path=dataset_path,
zip_file_names=filenames,
path_to_label_fn=_path_to_label_fn(class_names))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
uiuc_texture_dataset = types.DownloadableDataset(
name='uiuc_texture',
download_urls=[
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/texture_database/T01-T05.zip',
checksum='e622e4708e336d51b3bbd45503618af1'),
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/texture_database/T06-T10.zip',
checksum='50ab35564058d3f1d05e6f5d767db5df'),
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/texture_database/T11-T15.zip',
checksum='d1c7584aeb0ab1e41c7157c60e84c3ad'),
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/texture_database/T16-T20.zip',
checksum='d527ba7d820e1eeaef49f9c07b82aa34'),
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/texture_database/T21-T25.zip',
checksum='74a29af10123f2be70ea481e5af3ec36')
],
handler=uiuc_texture_handler,
paper_title='A Training-free Classification Framework for Textures, Writers, and Materials',
authors='R. Timofte1 and L. Van Gool',
year='2012')
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/uiuc_texture.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DDSM dataset handler.
Breast-cancer dataset, described in
https://www.kaggle.com/awsaf49/cbis-ddsm-breast-cancer-image-dataset. The
dataset comes with a possibility of using full or cropped image. In the handler,
we made a choice of using the full image. Moreover, the dataset comes with 2
different train/test splits called `mass_case` and `calc_case`. We merged
together the 2 splits into single train/test split.
"""
import os
import zipfile
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
import pandas as pd
from PIL import Image
# Can also use 'cropped image file path'
_IMAGE_FILE_PATH_NAME = 'image file path'
_DICOM_INFO_FILE_PATH = 'csv/dicom_info.csv'
_CALC_TEST_SET_FILE_PATH = 'csv/calc_case_description_test_set.csv'
_CALC_TRAIN_SET_FILE_PATH = 'csv/calc_case_description_train_set.csv'
_MASS_TEST_FILE_PATH = 'csv/mass_case_description_test_set.csv'
_MASS_TRAIN_FILE_PATH = 'csv/mass_case_description_train_set.csv'
_ARCHIVE_NAME = 'cbis-ddsm-breast-cancer-image-dataset.zip'
# pylint:disable=missing-function-docstring
def ddsm_handler(dataset_path: str) -> types.HandlerOutput:
labels = ['BENIGN', 'MALIGNANT', 'BENIGN_WITHOUT_CALLBACK']
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='xray',
))
with zipfile.ZipFile(os.path.join(dataset_path, _ARCHIVE_NAME), 'r') as zf:
dicom_info = pd.read_csv(zf.open(_DICOM_INFO_FILE_PATH))
dicom_info['extracted_patient_id'] = dicom_info['PatientID']
def _extract_image_path(path):
return os.path.join('jpeg', os.path.basename(os.path.dirname(path)),
os.path.basename(path))
dicom_info['image_path'] = dicom_info['image_path'].apply(
_extract_image_path)
calc_case_description_test_set = pd.read_csv(
zf.open(_CALC_TEST_SET_FILE_PATH))
calc_case_description_train_set = pd.read_csv(
zf.open(_CALC_TRAIN_SET_FILE_PATH))
mass_case_description_test_set = pd.read_csv(zf.open(_MASS_TEST_FILE_PATH))
mass_case_description_train_set = pd.read_csv(
zf.open(_MASS_TRAIN_FILE_PATH))
train_set = pd.concat(
[calc_case_description_train_set, mass_case_description_train_set])
test_set = pd.concat(
[calc_case_description_test_set, mass_case_description_test_set])
train_set['extracted_patient_id'] = train_set[_IMAGE_FILE_PATH_NAME].apply(
lambda x: x.split('/')[0])
test_set['extracted_patient_id'] = test_set[_IMAGE_FILE_PATH_NAME].apply(
lambda x: x.split('/')[0])
train_set = pd.merge(
train_set, dicom_info, on='extracted_patient_id', how='inner')
train_set = train_set.drop_duplicates(
subset='image_path', ignore_index=True)
test_set = pd.merge(
test_set, dicom_info, on='extracted_patient_id', how='inner')
test_set = test_set.drop_duplicates(subset='image_path', ignore_index=True)
def gen(data_df, label_to_id):
with zipfile.ZipFile(os.path.join(dataset_path, _ARCHIVE_NAME), 'r') as zf:
for _, row in data_df.iterrows():
image_path = row['image_path']
label = label_to_id[row.pathology]
image = Image.open(zf.open(image_path))
image.load()
yield (image, label)
make_gen_fn = lambda: gen(train_set, label_to_id)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_TRAIN,
splits.MERGED_TRAIN_AND_DEV)
per_split_gen['test'] = gen(test_set, label_to_id)
return metadata, per_split_gen
ddsm_dataset = types.DownloadableDataset(
name='ddsm',
download_urls=[
types.KaggleDataset(
dataset_name='awsaf49/cbis-ddsm-breast-cancer-image-dataset',
checksum='eba16e95a30193fcbda1d2668d96015f')
],
website_url='https://www.kaggle.com/awsaf49/cbis-ddsm-breast-cancer-image-dataset',
handler=ddsm_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/ddsm.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ALOT dataset handler.""" # NOTYPO
import functools
import os
import tarfile
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
# pylint:disable=missing-function-docstring
def alot_handler(dataset_path: str,
is_grey: bool = False) -> types.HandlerOutput:
files = gfile.listdir(dataset_path)
assert len(files) == 1
alot_file = files[0]
# Calculate number of classes.
with tarfile.open(os.path.join(dataset_path, alot_file)) as tfile:
num_classes = len(
set([member.path.split('/')[1] for member in tfile.getmembers()[1:]]))
metadata = types.DatasetMetaData(
num_channels=1 if is_grey else 3,
num_classes=num_classes,
image_shape=(), # Ignored for now.
additional_metadata=dict())
def gen_split():
return utils.generate_images_from_tarfiles(
alot_file, working_directory=dataset_path,
path_to_label_fn=lambda x: int(x.split('/')[1]) - 1)
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen_split, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
alot_dataset = types.DownloadableDataset(
name='alot', # NOTYPO
download_urls=[
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/public_alot/tars/alot_png4.tar',
checksum='c3f489c9072d3469e43225c5906e01e2')
], # NOTYPO
handler=alot_handler) # NOTYPO
alot_grey_dataset = types.DownloadableDataset(
name='alot_grey', # NOTYPO
download_urls=[
types.DownloadableArtefact(
url='http://aloi.science.uva.nl/public_alot/tars/alot_grey4.tar',
checksum='0f5b72b1fe1ac8381821d45e5087dd99')
], # NOTYPO
handler=functools.partial(alot_handler, is_grey=True)) # NOTYPO
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/alot.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caltech256 dataset handler."""
import re
from dm_nevis.datasets_storage.handlers import extraction_utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
LABELS_TO_ID = {
"ak47": 0,
"american-flag": 1,
"backpack": 2,
"baseball-bat": 3,
"baseball-glove": 4,
"basketball-hoop": 5,
"bat": 6,
"bathtub": 7,
"bear": 8,
"beer-mug": 9,
"billiards": 10,
"binoculars": 11,
"birdbath": 12,
"blimp": 13,
"bonsai-101": 14,
"boom-box": 15,
"bowling-ball": 16,
"bowling-pin": 17,
"boxing-glove": 18,
"brain-101": 19,
"breadmaker": 20,
"buddha-101": 21,
"bulldozer": 22,
"butterfly": 23,
"cactus": 24,
"cake": 25,
"calculator": 26,
"camel": 27,
"cannon": 28,
"canoe": 29,
"car-tire": 30,
"cartman": 31,
"cd": 32,
"centipede": 33,
"cereal-box": 34,
"chandelier-101": 35,
"chess-board": 36,
"chimp": 37,
"chopsticks": 38,
"cockroach": 39,
"coffee-mug": 40,
"coffin": 41,
"coin": 42,
"comet": 43,
"computer-keyboard": 44,
"computer-monitor": 45,
"computer-mouse": 46,
"conch": 47,
"cormorant": 48,
"covered-wagon": 49,
"cowboy-hat": 50,
"crab-101": 51,
"desk-globe": 52,
"diamond-ring": 53,
"dice": 54,
"dog": 55,
"dolphin-101": 56,
"doorknob": 57,
"drinking-straw": 58,
"duck": 59,
"dumb-bell": 60,
"eiffel-tower": 61,
"electric-guitar-101": 62,
"elephant-101": 63,
"elk": 64,
"ewer-101": 65,
"eyeglasses": 66,
"fern": 67,
"fighter-jet": 68,
"fire-extinguisher": 69,
"fire-hydrant": 70,
"fire-truck": 71,
"fireworks": 72,
"flashlight": 73,
"floppy-disk": 74,
"football-helmet": 75,
"french-horn": 76,
"fried-egg": 77,
"frisbee": 78,
"frog": 79,
"frying-pan": 80,
"galaxy": 81,
"gas-pump": 82,
"giraffe": 83,
"goat": 84,
"golden-gate-bridge": 85,
"goldfish": 86,
"golf-ball": 87,
"goose": 88,
"gorilla": 89,
"grand-piano-101": 90,
"grapes": 91,
"grasshopper": 92,
"guitar-pick": 93,
"hamburger": 94,
"hammock": 95,
"harmonica": 96,
"harp": 97,
"harpsichord": 98,
"hawksbill-101": 99,
"head-phones": 100,
"helicopter-101": 101,
"hibiscus": 102,
"homer-simpson": 103,
"horse": 104,
"horseshoe-crab": 105,
"hot-air-balloon": 106,
"hot-dog": 107,
"hot-tub": 108,
"hourglass": 109,
"house-fly": 110,
"human-skeleton": 111,
"hummingbird": 112,
"ibis-101": 113,
"ice-cream-cone": 114,
"iguana": 115,
"ipod": 116,
"iris": 117,
"jesus-christ": 118,
"joy-stick": 119,
"kangaroo-101": 120,
"kayak": 121,
"ketch-101": 122,
"killer-whale": 123,
"knife": 124,
"ladder": 125,
"laptop-101": 126,
"lathe": 127,
"leopards-101": 128,
"license-plate": 129,
"lightbulb": 130,
"light-house": 131,
"lightning": 132,
"llama-101": 133,
"mailbox": 134,
"mandolin": 135,
"mars": 136,
"mattress": 137,
"megaphone": 138,
"menorah-101": 139,
"microscope": 140,
"microwave": 141,
"minaret": 142,
"minotaur": 143,
"motorbikes-101": 144,
"mountain-bike": 145,
"mushroom": 146,
"mussels": 147,
"necktie": 148,
"octopus": 149,
"ostrich": 150,
"owl": 151,
"palm-pilot": 152,
"palm-tree": 153,
"paperclip": 154,
"paper-shredder": 155,
"pci-card": 156,
"penguin": 157,
"people": 158,
"pez-dispenser": 159,
"photocopier": 160,
"picnic-table": 161,
"playing-card": 162,
"porcupine": 163,
"pram": 164,
"praying-mantis": 165,
"pyramid": 166,
"raccoon": 167,
"radio-telescope": 168,
"rainbow": 169,
"refrigerator": 170,
"revolver-101": 171,
"rifle": 172,
"rotary-phone": 173,
"roulette-wheel": 174,
"saddle": 175,
"saturn": 176,
"school-bus": 177,
"scorpion-101": 178,
"screwdriver": 179,
"segway": 180,
"self-propelled-lawn-mower": 181,
"sextant": 182,
"sheet-music": 183,
"skateboard": 184,
"skunk": 185,
"skyscraper": 186,
"smokestack": 187,
"snail": 188,
"snake": 189,
"sneaker": 190,
"snowmobile": 191,
"soccer-ball": 192,
"socks": 193,
"soda-can": 194,
"spaghetti": 195,
"speed-boat": 196,
"spider": 197,
"spoon": 198,
"stained-glass": 199,
"starfish-101": 200,
"steering-wheel": 201,
"stirrups": 202,
"sunflower-101": 203,
"superman": 204,
"sushi": 205,
"swan": 206,
"swiss-army-knife": 207,
"sword": 208,
"syringe": 209,
"tambourine": 210,
"teapot": 211,
"teddy-bear": 212,
"teepee": 213,
"telephone-box": 214,
"tennis-ball": 215,
"tennis-court": 216,
"tennis-racket": 217,
"theodolite": 218,
"toaster": 219,
"tomato": 220,
"tombstone": 221,
"top-hat": 222,
"touring-bike": 223,
"tower-pisa": 224,
"traffic-light": 225,
"treadmill": 226,
"triceratops": 227,
"tricycle": 228,
"trilobite-101": 229,
"tripod": 230,
"t-shirt": 231,
"tuning-fork": 232,
"tweezer": 233,
"umbrella-101": 234,
"unicorn": 235,
"vcr": 236,
"video-projector": 237,
"washing-machine": 238,
"watch-101": 239,
"waterfall": 240,
"watermelon": 241,
"welding-mask": 242,
"wheelbarrow": 243,
"windmill": 244,
"wine-bottle": 245,
"xylophone": 246,
"yarmulke": 247,
"yo-yo": 248,
"zebra": 249,
"airplanes-101": 250,
"car-side-101": 251,
"faces-easy-101": 252,
"greyhound": 253,
"tennis-shoes": 254,
"toad": 255,
"clutter": 256
}
_OBJECT_CATEGORIES_PATH = "256_ObjectCategories.tar"
# Filnames in the archive look like this:
# 256_ObjectCategories/003.backpack/003_0001.jpg
#
# where 003 is the label-number 1..257 (one-based).
_FILE_PATH_REGEX = re.compile(
r"256_ObjectCategories/(\d\d\d)\.(.+)/(\d\d\d)_(\d\d\d\d)\.jpg")
def caltech256_handler(download_path: str) -> types.HandlerOutput:
"""Imports Caltech256 dataset."""
def path_to_label_fn(fname):
fname_match = _FILE_PATH_REGEX.match(fname)
if not fname_match:
return None
else:
label_id = int(fname_match.group(1)) - 1
label_str = fname_match.group(2)
assert LABELS_TO_ID[label_str] == label_id
return label_id
def gen_split():
yield from extraction_utils.generate_images_from_tarfiles(
_OBJECT_CATEGORIES_PATH,
working_directory=download_path,
path_to_label_fn=path_to_label_fn,
convert_mode="RGB")
metadata = types.DatasetMetaData(
num_classes=len(LABELS_TO_ID),
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_str_to_id=LABELS_TO_ID,
task_type="classification",
image_type="object"))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
gen_split, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return (metadata, per_split_gen)
caltech256_dataset = types.DownloadableDataset(
name="caltech256",
download_urls=[
types.DownloadableArtefact(
url="https://drive.google.com/u/0/uc?id=1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK&export=download&confirm=y",
checksum="67b4f42ca05d46448c6bb8ecd2220f6d")
],
website_url="http://www.vision.caltech.edu/Image_Datasets/Caltech256/",
paper_url="The Caltech 256. Caltech Technical Report.",
authors=" Griffin, G. Holub, AD. Perona, P.",
papers_with_code_url="https://paperswithcode.com/dataset/caltech-256",
handler=caltech256_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/caltech256.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_nevis.datasets_storage.handlers."""
from absl.testing import absltest
from dm_nevis.datasets_storage import handlers
class DatasetHandlersTest(absltest.TestCase):
def test_matching_name(self):
"""Test dataset names in handler._DATASETS_TO_HANDLERS match."""
for name, downloadable_dataset in handlers._DATASETS_TO_HANDLERS.items():
self.assertEqual(
name, downloadable_dataset.name,
("Dataset name in registry (_DATASETS_TO_HANDLERS) and in" +
" DownloadableDataset structure must match."))
if __name__ == "__main__":
absltest.main()
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/handler_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scenes8 handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
def _path_to_label_fn(path: str, label_to_id):
label = os.path.dirname(path)
return label_to_id[label]
def scenes15_handler(dataset_path: str) -> types.HandlerOutput:
"""A handler for Scenes15 dataset."""
files = gfile.listdir(dataset_path)
labels = [
'PARoffice', 'kitchen', 'MITinsidecity', 'MITforest', 'MITcoast',
'MITstreet', 'store', 'industrial', 'bedroom', 'MITmountain',
'MIThighway', 'MITopencountry', 'CALsuburb', 'MITtallbuilding',
'livingroom'
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='scene',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
convert_mode='L')
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
scenes15_dataset = types.DownloadableDataset(
name='scenes15',
download_urls=[
types.DownloadableArtefact(
url='https://web.archive.org/web/20070829035029/http://www-cvr.ai.uiuc.edu/ponce_grp/data/scene_categories/scene_categories.zip',
checksum='58828019197b2ad0a7efb472e7a85c2a')
],
handler=scenes15_handler)
|
dm_nevis-master
|
dm_nevis/datasets_storage/handlers/scenes15.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to install the experiments_jax library."""
import setuptools
with open("requirements.txt", "r") as f:
dependencies = list(map(lambda x: x.strip(), f.readlines()))
setuptools.setup(
name="experiments_jax",
version="0.1",
author="DeepMind Nevis Team",
author_email="See authors emails in paper",
description="Nevis experiments in JAX.",
long_description="Nevis experiments in JAX.",
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_nevis",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
],
python_requires=">=3.8",
install_requires=dependencies)
|
dm_nevis-master
|
dm_nevis/opensource/dm_nevis_library/jax_setup.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to install the experiments_torch library."""
import setuptools
with open("requirements.txt", "r") as f:
dependencies = list(map(lambda x: x.strip(), f.readlines()))
setuptools.setup(
name="experiments_torch",
version="0.1",
author="DeepMind Nevis Team",
author_email="See authors emails in paper",
description="Nevis experiments in PyTorch.",
long_description="Nevis experiments in PyTorch.",
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_nevis",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
],
python_requires=">=3.8",
install_requires=dependencies)
|
dm_nevis-master
|
dm_nevis/opensource/dm_nevis_library/torch_setup.py
|
dm_nevis-master
|
experiments_torch/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a launchpad program for benchmarking learners.
This module provides an interface for constructing a launchpad program that can
benchmark learners using the benchmarker. Learners are configured using
instances of the `ExperimentConfig` class. This class may be initialized from an
`ml_collections.ConfigDict` using the `config_from_config_dict` function.
To benchmark a learner on launchpad, users must provide a function
`launchpad_learner_builder` that,
1) adds any learner-specific launchpad nodes to the launchpad program and,
2) returns a callable for building the learner.
We opt to pass around callable "builder" functions rather than passing around
instantiated objects, since these builder functions may be called directly on
the launchpad nodes where the objects are to be instantiated. This means that we
do not require that the object instances be serializable, and means that
launcher program does not have to initialize the stream or learner in order to
build the programs being launched.
When running on XManager, launchpad requires information on the resource types
to use for each program group. Every program defined by this module will
contain an `environment` group, which is the leader thread running the outer
loop of the environment. A single node will be started in this group
containing the environment. This node will also instantiate the learner
using the builder function returned by `launchpad_learner_builder`. For learners
that require only a single node, it may suffice to allocate a sufficiently large
resource type to the environment group.
"""
import dataclasses
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Protocol
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.environment import environment
from dm_nevis.benchmarker.learners import learner_interface
from experiments_torch.environment import logging_writer
from experiments_torch.environment import noop_checkpointer
from experiments_torch.metrics import nevis_metrics
import ml_collections
import torch
ProgramStopper = Callable[[], None]
LearnerBuilderFn = Callable[[], learner_interface.Learner]
StreamBuilderFn = Callable[[], streams.Stream]
DatasetLookupFn = Callable[[streams.DatasetKey], datasets.Dataset]
DatasetLookupBuilderFn = Callable[[], Tuple[DatasetLookupFn,
Sequence[tasks.TaskKey]]]
BENCHMARKER_DATAFRAME = "benchmark"
class MetaLearnerBuilderFn(Protocol):
"""The signature of the function that prepares a learner to run on launchpad.
Learners are given access to the launchpad program, which allows them to
add auxiliary nodes to the launchpad program. The function then returns a
callable used to initialize the learner. Note that the returned callable will
be executed diretly on the node running the environment, this means that the
learner does not need to be serializable. Similarly, the dataset lookup is
wrapped in a builder function. This builder function must be serializable,
but the actual dataset lookup returned from the builder need not be.
In order to ensure graceful termination when using launchpad with threads
(which we use for running tests on TAP), learners can provide a function for
gracefully terminating any resources that they have spawned.
"""
def __call__(
self, *, dataset_lookup_builder: DatasetLookupBuilderFn,
learner_config: ml_collections.ConfigDict
) -> Tuple[LearnerBuilderFn, ProgramStopper]:
"""Callable used to initialize the learner.
Args:
dataset_lookup_builder: A function that returns a dataset lookup, and the
sequence of training task keys that will be fed to the learner. This is
a 'builder' function since we want to be able to construct the objects
directly on the machines where they will run, rather than constructing
them in the launchpad main process and then pickling the functions.
learner_config: The learner-specific configuration.
Returns:
A function for constructing a learner satisfying the learner interface,
and a function for gracefully stopping the learner's resources.
"""
@dataclasses.dataclass
class LearnerConfig:
learner_builder: MetaLearnerBuilderFn
config: ml_collections.ConfigDict
@dataclasses.dataclass
class StreamConfig:
ctor: Callable[..., streams.Stream]
kwargs: Mapping[str, Any]
@dataclasses.dataclass
class ExperimentConfig:
resume_from_checkpoint_path: str
stream: StreamConfig
learner: LearnerConfig
def config_from_config_dict(cfg: ml_collections.ConfigDict) -> ExperimentConfig:
"""Constructs a typed experiment config from an untyped config dict."""
resume_from_checkpoint_path = cfg.resume_from_checkpoint_path
stream_config = StreamConfig(**cfg.stream)
learner_config = LearnerConfig(**cfg.learner)
return ExperimentConfig(
resume_from_checkpoint_path=resume_from_checkpoint_path,
stream=stream_config,
learner=learner_config,
)
def _stopper():
return
def run_program(config: ExperimentConfig, device_id: int):
"""Prepares a launchpad program to be executed."""
stream_builder, dataset_lookup_builder = _stream_builders(config.stream)
config.learner.config.device = get_device(device_id)
logging.info("Building learner to run on launchpad")
learner_builder, learner_stopper = config.learner.learner_builder(
dataset_lookup_builder=dataset_lookup_builder,
learner_config=config.learner.config,
)
benchmark_metrics_writer = config.learner.config.get_metrics_writer(
"benchmarker")
return _run_environment(config.resume_from_checkpoint_path, stream_builder,
learner_builder, learner_stopper, _stopper,
benchmark_metrics_writer)
def _run_environment(checkpoint_restore_path: Optional[str],
stream_builder: StreamBuilderFn,
learner_builder: LearnerBuilderFn,
learner_stopper: ProgramStopper, stopper: ProgramStopper,
benchmark_metrics_writer: datawriter_interface.DataWriter):
"""Runs the environment."""
learner = learner_builder()
stream = stream_builder()
checkpointer = noop_checkpointer.NoOpCheckpointer(
restore_path=checkpoint_restore_path)
metrics = nevis_metrics.nevis_metrics(stream.get_dataset_by_key,
benchmark_metrics_writer)
optional_checkpoint_to_resume = checkpointer.restore()
output = environment.run(
learner,
stream,
metrics,
write_checkpoint=checkpointer.write,
checkpoint_to_resume=optional_checkpoint_to_resume,
)
metrics = {
**output.results,
**dataclasses.asdict(output.train_resources_used)
}
logging.info("Benchmark Results: %s", metrics)
benchmark_metrics_writer.close() # Flush and close metrics writer
logging.info("Stopping Launchpad...")
learner_stopper()
stopper()
def _stream_builders(
config: StreamConfig) -> Tuple[StreamBuilderFn, DatasetLookupBuilderFn]:
"""Builds functions that can instantiate the stream and dataset lookup."""
def stream_builder():
return config.ctor(**config.kwargs)
def dataset_lookup_builder():
stream = stream_builder()
task_keys = _all_train_task_keys(stream)
return stream.get_dataset_by_key, task_keys
return stream_builder, dataset_lookup_builder
def _all_train_task_keys(stream: streams.Stream) -> Sequence[tasks.TaskKey]:
task_keys = []
# TODO: Consider adding this to the stream interface.
for event in stream.events():
if isinstance(event, streams.TrainingEvent):
dataset = stream.get_dataset_by_key(event.train_dataset_key)
task_keys.append(dataset.task_key)
return task_keys
def _benchmark_metrics_writer() -> datawriter_interface.DataWriter:
"""Get a metrics writer for writing intermediate results on the test sets."""
logging.warning("Using dummy writer, writing metrics to logs.")
return logging_writer.LoggingWriter()
def get_device(device_id: int) -> torch.device:
"""Get a torch device from its id.
device_id = 2 will returns a handle on the third cuda gpu.
device_id = -1 will returns a handle on the cpu.
Args:
device_id: an identifier of the device to use.
Returns:
A handle on the device to use.
"""
if device_id == -1:
return torch.device("cpu")
return torch.device(f"cuda:{device_id}")
|
dm_nevis-master
|
experiments_torch/experiment.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint for the PyTorch experiments."""
from collections.abc import Sequence
from absl import app
from absl import flags
from experiments_torch import experiment
from ml_collections import config_flags
_CONFIG = config_flags.DEFINE_config_file(
'config', None, 'Configuration File', lock_config=False)
_DEVICE_ID = flags.DEFINE_integer('device', -1, 'Device ID, -1 for cpu')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
config = _CONFIG.value
device_id = _DEVICE_ID.value
experiment.run_program(config.experiment, device_id)
if __name__ == '__main__':
app.run(main)
|
dm_nevis-master
|
experiments_torch/launch.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for the Nevis project."""
# TODO: add back the test file
import collections
import dataclasses
import io
import os
from typing import Callable, Iterator, Mapping, Optional, Sequence, Union
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
from dm_nevis.benchmarker.datasets import streams
from dm_nevis.benchmarker.datasets import tasks
from dm_nevis.benchmarker.environment import datawriter_interface
from dm_nevis.benchmarker.learners import learner_interface
from dm_nevis.benchmarker.metrics import classification_metrics
from dm_nevis.benchmarker.metrics import metrics_aggregators
from dm_nevis.benchmarker.metrics import multi_label_classification_metrics
import numpy as np
from tensorflow.io import gfile
KNOWN_SPLIT_SUFFICES = frozenset([
"train",
"dev",
"train_and_dev",
"dev_test",
"test",
])
UNKNOWN_SPLIT_NAME = "unknown_split"
DEFAULT_OUTPUT_DIR = os.environ.get("NEVIS_OUTPUT_DIR", "/tmp/nevis_output_dir")
@dataclasses.dataclass(frozen=True)
class PredictionMetrics:
event: streams.PredictionEvent
task: tasks.TaskKey
metrics: Union[
classification_metrics.ClassificationMetrics,
multi_label_classification_metrics.MultiLabelClassificationMetrics]
@dataclasses.dataclass(frozen=True)
class TrainMetrics:
event: streams.TrainingEvent
task: tasks.TaskKey
resources_used: learner_interface.ResourceUsage
@dataclasses.dataclass(frozen=True)
class NevisMetricsState:
"""The metrics state for this aggregator.
We maintain a 1-to-1 relationship between events in the stream and entries
in the (ordered) sequence of metrics objects.
"""
predictions_dir: str
metrics: Sequence[Union[PredictionMetrics, TrainMetrics]]
def nevis_metrics(
dataset_lookup: Callable[[streams.DatasetKey], datasets.Dataset],
metrics_writer: datawriter_interface.DataWriter
) -> metrics_aggregators.MetricsAggregator:
"""Returns a metrics aggregator for the Nevis stream.
This aggregator computes common classification metrics for every
prediction event in the stream. Once the stream has finished, the aggregator
will fetch the final computed metrics for each task, and then compute an
overall normalized accuracy for each of these final computed metrics,
normalized by the total number of examples.
Args:
dataset_lookup: A callable to retrieve datasets given the dataset key.
metrics_writer: A pipe to write debug metrics to. This will be written each
time aggregate is called.
Returns:
A metrics aggregator for use in the Nevis stream and with the benchmarker.
"""
def init() -> NevisMetricsState:
logging.info("Initializing metrics")
predictions_dir = _create_output_dir()
logging.info("Writing raw predictions to %s", predictions_dir)
return NevisMetricsState(
predictions_dir=predictions_dir,
metrics=[],
)
def aggregate_train_event(
state: NevisMetricsState,
event: streams.TrainingEvent,
resources_used: learner_interface.ResourceUsage,
) -> NevisMetricsState:
task_key = dataset_lookup(event.dev_dataset_key).task_key
return dataclasses.replace(
state,
metrics=[
*state.metrics,
TrainMetrics(event, task_key, resources_used),
],
)
def aggregate_predict_event(
state: NevisMetricsState,
event: streams.PredictionEvent,
predictions: Iterator[learner_interface.Predictions],
) -> NevisMetricsState:
resources_used = _combined_train_resources_used(state)
dataset = dataset_lookup(event.dataset_key)
task = dataset.task_key
task_kind = task.kind
outdir = os.path.join(
state.predictions_dir,
f"event_{len(state.metrics)}",
)
if not gfile.exists(outdir):
gfile.makedirs(outdir)
path = os.path.join(outdir, "raw_predictions.npz")
with WrappedPredictionsWriter(predictions, path=path, task=task) as wrapped:
if task_kind == tasks.TaskKind.CLASSIFICATION:
metrics = classification_metrics.compute_metrics(wrapped)
elif task_kind == tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
metrics = multi_label_classification_metrics.compute_metrics(wrapped)
else:
raise NotImplementedError(f"Unsupported task kind: {task_kind}.")
payload = {
"raw_predictions_and_targets_path": path,
"stream_index": len(state.metrics),
"index_of_most_recent_train_event": _num_train_events(state) - 1,
"task_name": task.name,
"task_kind": str(task.kind),
"dataset_key": str(event.dataset_key),
"data_split": _try_to_extract_split(event.dataset_key),
"cumulative_train_flops_used": resources_used.floating_point_operations,
"peak_parameter_count": resources_used.peak_parameter_count,
"peak_parameter_size_bytes": resources_used.peak_parameter_size_bytes,
**metrics._asdict(),
}
logging.info("Metrics for task %s: %s", task.name, payload)
metrics_writer.write(payload)
metrics_writer.flush()
return dataclasses.replace(
state,
metrics=[*state.metrics,
PredictionMetrics(event, task, metrics)],
)
return metrics_aggregators.MetricsAggregator(init, aggregate_train_event,
aggregate_predict_event,
_compute_results)
def _compute_results(state: NevisMetricsState) -> metrics_aggregators.Results:
"""Compute statistics over the stream."""
prediction_metrics_by_split = _extract_prediction_metrics_by_split(state)
results = {}
for split, metrics in prediction_metrics_by_split.items():
single_label_results = _compute_single_label_results(metrics)
multi_label_results = _compute_multi_label_results(metrics)
for key, value in single_label_results.items():
results[f"{split}_{key}"] = value
for key, value in multi_label_results.items():
results[f"{split}_{key}"] = value
return results
def _extract_prediction_metrics_by_split(
state: NevisMetricsState) -> Mapping[str, Sequence[PredictionMetrics]]:
"""Separates out the predict metrics by dataset split name."""
predict_metrics_by_split = collections.defaultdict(list)
for m in state.metrics:
if not isinstance(m, PredictionMetrics):
continue
split = _try_to_extract_split(m.event.dataset_key) or UNKNOWN_SPLIT_NAME
predict_metrics_by_split[split].append(m)
return dict(predict_metrics_by_split)
def _compute_single_label_results(
metrics: Sequence[PredictionMetrics]) -> metrics_aggregators.Results:
"""Compute results for single class case."""
num_events = 0
top_one_correct = 0
num_examples = 0
total_accuracy = 0.0
for m in metrics:
if not isinstance(m.metrics, classification_metrics.ClassificationMetrics):
continue
num_events += 1
total_accuracy += m.metrics.top_one_accuracy
top_one_correct += m.metrics.top_one_correct
num_examples += m.metrics.num_examples
if num_examples == 0:
weighted_accuracy = float("nan")
accuracy = float("nan")
else:
weighted_accuracy = top_one_correct / num_examples
accuracy = total_accuracy / num_events
return {
"weighted_average_single_label_accuracy": weighted_accuracy,
"average_single_label_accuracy": accuracy,
}
def _compute_multi_label_results(
metrics: Sequence[PredictionMetrics]) -> metrics_aggregators.Results:
"""Compute results for multi label case."""
num_events = 0
total_mean_average_precision = 0.0
for m in metrics:
if not isinstance(
m.metrics,
multi_label_classification_metrics.MultiLabelClassificationMetrics):
continue
num_events += 1
total_mean_average_precision += m.metrics.mean_average_precision
if num_events == 0:
mean_mean_average_precision = float("nan")
else:
mean_mean_average_precision = total_mean_average_precision / num_events
# TODO: Find a better way to combine mAP.
return {
"average_multi_label_mean_average_precision": mean_mean_average_precision
}
def _combined_train_resources_used(
state: NevisMetricsState) -> learner_interface.ResourceUsage:
"""Computes total train resources used so far."""
result = None
for m in state.metrics:
if not isinstance(m, TrainMetrics):
continue
if result is None:
result = m.resources_used
else:
result = result.combine(m.resources_used)
if result is None:
return learner_interface.ResourceUsage()
return result
def _num_train_events(state: NevisMetricsState) -> int:
return sum(1 for m in state.metrics if isinstance(m, TrainMetrics))
def _try_to_extract_split(dataset_key: str) -> Optional[str]:
"""Attempts to compute the split from the dataset key.
For the Nevis stream, the dataset splits are stored at the end of the dataset
key, as `<dataset_name>_<split>`.
Args:
dataset_key: The key to try and compute the split for.
Returns:
The split name, or None if no match was found.
"""
suffices_by_length = sorted(KNOWN_SPLIT_SUFFICES, key=lambda x: -len(x))
for suffix in suffices_by_length:
if dataset_key.endswith("_" + suffix):
return suffix
return None
def _create_output_dir() -> str:
result = os.path.join(DEFAULT_OUTPUT_DIR, "predictions")
if not gfile.exists(result):
gfile.makedirs(result)
return result
class WrappedPredictionsWriter:
"""A writer for storing raw predictions to an output file.
This writer wraps a prediction iterator and copies the raw outputs
and targets in memory. When the context managed by this object is closed,
the raw data is concatenated together into a single numpy array, and then
written into a multipart numpy array to the output path.
"""
def __init__(self, predictions: Iterator[learner_interface.Predictions], *,
path: str, task: tasks.TaskKey):
if task.kind not in {
tasks.TaskKind.CLASSIFICATION,
tasks.TaskKind.MULTI_LABEL_CLASSIFICATION,
}:
raise ValueError("Cannot save predictions for unsupported task: {task}")
self._task = task
self._path = path
self._iter = predictions
self._raw_targets = []
self._raw_outputs = []
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
if not self._raw_outputs:
logging.warning("Skipping writing empty predictions...")
return
logging.info("Writing targets and outputs to local files...")
targets = np.concatenate(self._raw_targets, axis=0)
outputs = np.concatenate(self._raw_outputs, axis=0)
# https://github.com/tensorflow/tensorflow/issues/32090#issuecomment-986135710
io_buffer = io.BytesIO()
np.savez(io_buffer, targets=targets, outputs=outputs)
with gfile.GFile(self._path, "wb") as outfile:
logging.info("Writing raw targets and outputs to %s", self._path)
outfile.write(io_buffer.getvalue())
logging.info("Finished writing raw targets and outputs.")
def __iter__(self):
return self
def __next__(self):
prediction = next(self._iter)
if self._task.kind is tasks.TaskKind.CLASSIFICATION:
targets, outputs = prediction.batch.label, prediction.output
self._raw_targets.append(targets)
self._raw_outputs.append(outputs[0])
elif self._task.kind is tasks.TaskKind.MULTI_LABEL_CLASSIFICATION:
targets, outputs = prediction.batch.multi_label_one_hot, prediction.output
self._raw_targets.append(targets)
self._raw_outputs.append(np.stack(outputs, axis=1))
else:
raise ValueError(f"Unsupported task: {self._task.kind}")
return prediction
|
dm_nevis-master
|
experiments_torch/metrics/nevis_metrics.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dm_nevis-master
|
experiments_torch/metrics/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multihead models."""
import contextlib
from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Tuple
from dm_nevis.benchmarker.datasets import tasks
from experiments_torch.training import heads
import torch
from torch import nn
PredictFn = Callable[[torch.Tensor, tasks.TaskKey], torch.Tensor]
LossAndMetricsFn = Callable[[torch.Tensor, torch.Tensor, bool, tasks.TaskKey],
Tuple[torch.Tensor, Mapping[str, float]]]
class Model(nn.Module):
"""PyTorch model.
Attributes:
backbone: a features extractor (CNN, MLP...) producing a flat embedding per
image.
heads_map: A mapping of task key to head classifier.
"""
def __init__(self, backbone: nn.Module, heads_map: Mapping[str, heads.Head]):
super().__init__()
self.backbone = backbone
self.heads_map = nn.ParameterDict(heads_map) # pytype: disable=module-attr
def forward(self, images: torch.Tensor, labels: torch.Tensor,
is_training: bool, task_key: tasks.TaskKey):
context_manager = contextlib.nullcontext if is_training else torch.no_grad
training_mode = self.training
self.train(is_training)
with context_manager():
embeddings = self.backbone(images)
loss_and_metrics = self.heads_map[task_key.name].loss_and_metrics(
embeddings, labels, is_training)
self.train(training_mode)
return loss_and_metrics
def loss_and_metrics(self, images: torch.Tensor, labels: torch.Tensor,
is_training: bool, task_key: tasks.TaskKey):
return self.forward(images, labels, is_training, task_key)
def predict(self, images: torch.Tensor, task_key: tasks.TaskKey):
training_mode = self.training
self.eval()
with torch.no_grad():
embeddings = self.backbone(images)
outputs = self.heads_map[task_key.name].predict(
embeddings, is_training=False, as_probs=True)
self.train(training_mode)
return [o.cpu().numpy() for o in outputs]
def build_model(model_ctor: Callable[..., Any],
supported_tasks: Iterable[tasks.TaskKey],
head_kwargs: Optional[Dict[str, Any]] = None) -> Model:
"""Constructs a model with a backbone and multiple task heads.
Args:
model_ctor: Constructor for the backbone.
supported_tasks: The tasks that the returned model supports training on.
head_kwargs: kwargs for head constructor.
Returns:
A model implementing the independent baseline strategy.
"""
head_kwargs = head_kwargs or {}
backbone = model_ctor(name="backbone")
heads_map = {}
for task_key in supported_tasks:
heads_map[task_key.name] = heads.build_head(backbone.features_dim,
{task_key}, **head_kwargs)
return Model(backbone, heads_map)
def size_summary(model: nn.Module) -> str:
"""Return a string summarizing the size of the `model` parameters."""
num_params = sum(p.numel() for p in model.parameters())
byte_size = num_params * 4 # 1 float32 == 4 bytes
return f"{num_params} params ({byte_size / 1e6:.2f}MB)"
def param_summary(model: nn.Module) -> str:
"""Return a string with a detailed parameter breakdown."""
return "\n".join([
f" {name}: {param.data.shape} [{param.data.dtype}]"
for name, param in model.named_parameters()
])
|
dm_nevis-master
|
experiments_torch/training/models.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for experiments_torch.training.resources."""
from absl.testing import absltest
from absl.testing import parameterized
from experiments_torch.training import resources
import torch
from torch import nn
@absltest.skipThisClass('Need PyTorch >= 1.13')
class ResourcesTest(parameterized.TestCase):
def test_linear(self):
module = nn.Linear(10, 1)
x = torch.ones((2, 10))
def _function():
loss = module(x).mean()
loss.backward()
flops = resources.estimate_flops(module, _function)
self.assertEqual(flops, 80)
if __name__ == '__main__':
absltest.main()
|
dm_nevis-master
|
experiments_torch/training/resources_test.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to split and load training data."""
from typing import Callable, Iterator
from absl import logging
from dm_nevis.benchmarker.datasets import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
BatchIterator = Iterator[datasets.MiniBatch]
PreprocFn = Callable[[datasets.MiniBatch], datasets.MiniBatch]
# For datasets containing fewer than this number of elements, we cache the
# dataset in memory, before preprocessing is applied. This avoids problematic
# cases where very small datasets require many requests to the underlying
# file storage.
DATASET_SIZE_TO_CACHE = 5_000
def build_train_iterator(dataset: datasets.Dataset, preproc_fn: PreprocFn,
batch_size: int) -> Callable[[], BatchIterator]:
"""Builds functions to iterate over train and validation data."""
def build_iterator() -> BatchIterator:
ds = dataset.builder_fn(shuffle=True)
if dataset.num_examples < DATASET_SIZE_TO_CACHE:
logging.info("Caching dataset with %d elements", dataset.num_examples)
ds = ds.cache()
buffer_size = min(DATASET_SIZE_TO_CACHE, dataset.num_examples)
ds = ds.shuffle(buffer_size, reshuffle_each_iteration=True)
ds = ds.repeat()
ds = ds.map(
preproc_fn,
num_parallel_calls=tf.data.AUTOTUNE) #, deterministic=False)
ds = ds.batch(batch_size)
ds = ds.prefetch(10)
return iter(tfds.as_numpy(ds))
return build_iterator
def build_prediction_iterator(dataset: datasets.Dataset, preproc_fn: PreprocFn,
batch_size: int) -> Callable[[], BatchIterator]:
"""Builds an iterator over batches for use in prediction."""
def build_iterator():
ds = dataset.builder_fn(shuffle=False)
ds = ds.map(preproc_fn, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.batch(batch_size)
ds = ds.prefetch(10)
return iter(tfds.as_numpy(ds))
return build_iterator
|
dm_nevis-master
|
experiments_torch/training/dataloaders.py
|
dm_nevis-master
|
experiments_torch/training/__init__.py
|
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements customized ResNets that return embeddings and logits."""
from typing import Any, Callable, List, Optional, Type, Union
import torch
from torch import nn
__all__ = ["ResNet", "resnet18", "resnet34", "resnet50", "resnet101"]
def conv3x3(in_planes: int,
out_planes: int,
stride: int = 1,
groups: int = 1,
dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution."""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
"""A basic residual block with optional learned downsampling."""
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the
# input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""A bottleneck residual block."""
# Bottleneck in torchvision places the stride for downsampling at 3x3
# convolution(self.conv2) while original implementation places the stride
# at the first 1x1 convolution(self.conv1) according to "Deep residual
# learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input
# when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""A residual network architecture."""
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
name: Optional[str] = None) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}")
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.features_dim = 512
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block
# behaves like an identity. This improves the model by 0.2~0.3% according
# to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck) and m.bn3.weight is not None:
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
))
return nn.Sequential(*layers)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# See note [TorchScript super()]
x = x.permute((0, 3, 1, 2))
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def _resnet(
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
**kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
return model
def resnet18(**kwargs: Any) -> ResNet:
return _resnet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs: Any) -> ResNet:
return _resnet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs: Any) -> ResNet:
return _resnet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs: Any) -> ResNet:
return _resnet(Bottleneck, [3, 4, 23, 3], **kwargs)
|
dm_nevis-master
|
experiments_torch/training/resnet.py
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for estimating resource usage of pytorch modules.
Works with PyTorch >=1.13
Thanks to Horace He for the help on this module.
For reference:
https://dev-discuss.pytorch.org/t/the-ideal-pytorch-flop-counter-with-torch-dispatch/505 # pylint: disable=line-too-long
"""
# pylint: disable=unused-argument
# pylint: disable=protected-access
# pylint: disable=missing-function-docstring
# pylint: disable=missing-class-docstring
# pytype: disable=module-attr
# pytype: disable=attribute-error
import collections
import logging
from typing import Any, List, Callable
import torch
from torch import nn
def estimate_flops(module: nn.Module, function: Callable) -> float: # pylint: disable=g-bare-generic
"""Estimate the number of flops.
Only takes in account the matmuls and convs but they make up the majority of
the flops.
Example function with closure could be:
```python
def function():
optmizer.zero_grad()
outputs = module(input)
loss = outputs.sum()
loss.backward()
optmizer.step()
```
Args:
module: a pytorch module whose flops will be recorded.
function: a function using that pytorch module
Returns:
The number of flops.
"""
flop_counter = FlopCounterMode(module)
with flop_counter:
function()
return flop_counter.total_flops
def get_shape(i):
return i.shape
def prod(x):
res = 1
for i in x:
res *= i
return res
def matmul_flop(inputs: List[Any], outputs: List[Any]) -> int:
"""Count flops for matmul."""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two matrices.
input_shapes = [get_shape(v) for v in inputs]
assert len(input_shapes) == 2, input_shapes
assert input_shapes[0][-1] == input_shapes[1][-2], input_shapes
flop = prod(input_shapes[0]) * input_shapes[-1][-1]
return flop
def addmm_flop(inputs: List[Any], outputs: List[Any]) -> int:
"""Count flops for fully connected layers."""
# Count flop for nn.Linear
# inputs is a list of length 3.
input_shapes = [get_shape(v) for v in inputs[1:3]]
# input_shapes[0]: [batch size, input feature dimension]
# input_shapes[1]: [batch size, output feature dimension]
assert len(input_shapes[0]) == 2, input_shapes[0]
assert len(input_shapes[1]) == 2, input_shapes[1]
batch_size, input_dim = input_shapes[0]
output_dim = input_shapes[1][1]
flops = batch_size * input_dim * output_dim
return flops
def bmm_flop(inputs: List[Any], outputs: List[Any]) -> int:
"""Count flops for the bmm operation."""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two tensor.
assert len(inputs) == 2, len(inputs)
input_shapes = [get_shape(v) for v in inputs]
n, c, t = input_shapes[0]
d = input_shapes[-1][-1]
flop = n * c * t * d
return flop
def conv_flop_count(
x_shape: List[int],
w_shape: List[int],
out_shape: List[int],
transposed: bool = False,
) -> int:
"""Count flops for convolution.
Note only multiplication is
counted. Computation for addition and bias is ignored.
Flops for a transposed convolution are calculated as
flops = (x_shape[2:] * prod(w_shape) * batch_size).
Args:
x_shape: (list(int)): The input shape before convolution.
w_shape: (list(int)): The filter shape.
out_shape: (list(int)): The output shape after convolution.
transposed: (bool): is the convolution transposed
Returns:
int: the number of flops
"""
batch_size = x_shape[0]
conv_shape = (x_shape if transposed else out_shape)[2:]
flop = batch_size * prod(w_shape) * prod(conv_shape)
return flop
def conv_flop(inputs: List[Any], outputs: List[Any]):
"""Count flops for convolution."""
x, w = inputs[:2]
x_shape, w_shape, out_shape = (get_shape(x), get_shape(w),
get_shape(outputs[0]))
transposed = inputs[6]
return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)
def transpose_shape(shape):
return [shape[1], shape[0]] + list(shape[2:])
def conv_backward_flop(inputs: List[Any], outputs: List[Any]):
grad_out_shape, x_shape, w_shape = [get_shape(i) for i in inputs[:3]]
output_mask = inputs[-1]
fwd_transposed = inputs[7]
flop_count = 0
if output_mask[0]:
grad_input_shape = get_shape(outputs[0])
flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape,
not fwd_transposed)
if output_mask[1]:
grad_weight_shape = get_shape(outputs[1])
flop_count += conv_flop_count(
transpose_shape(x_shape), grad_out_shape, grad_weight_shape,
fwd_transposed)
return flop_count
try:
aten = torch.ops.aten
flop_mapping = {
aten.mm: matmul_flop,
aten.matmul: matmul_flop,
aten.addmm: addmm_flop,
aten.bmm: bmm_flop,
aten.convolution: conv_flop,
aten._convolution: conv_flop, # pylint: disable=protected-access
aten.convolution_backward: conv_backward_flop,
}
TorchDispatchMode = torch.utils._python_dispatch.TorchDispatchMode
except AttributeError:
aten = None
flop_mapping = {}
TorchDispatchMode = object
logging.info('PyTorch version too low, try >=1.13.0')
def normalize_tuple(x):
if not isinstance(x, tuple):
return (x,)
return x
class FlopCounterMode(TorchDispatchMode):
"""Context manager to record flops."""
def __init__(self, module=None):
self.flop_counts = collections.defaultdict(
lambda: collections.defaultdict(int))
self.parents = ['Global']
if module is not None:
for name, mod in dict(module.named_children()).items():
mod.register_forward_pre_hook(self.enter_module(name))
mod.register_forward_hook(self.exit_module(name))
self.total_flops = 0.
self.per_module_flops = dict()
def enter_module(self, name):
def f(module, inputs):
self.parents.append(name)
inputs = normalize_tuple(inputs)
out = self.create_backwards_pop(name)(*inputs)
return out
return f
def exit_module(self, name):
def f(module, inputs, outputs):
assert self.parents[-1] == name
self.parents.pop()
outputs = normalize_tuple(outputs)
return self.create_backwards_push(name)(*outputs)
return f
def create_backwards_push(self, name):
class PushState(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
args = torch.utils._pytree.tree_map(
lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args)
if len(args) == 1:
return args[0]
return args
@staticmethod
def backward(ctx, *grad_outs):
self.parents.append(name)
return grad_outs
return PushState.apply
def create_backwards_pop(self, name):
class PopState(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
args = torch.utils._pytree.tree_map(
lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args)
if len(args) == 1:
return args[0]
return args
@staticmethod
def backward(ctx, *grad_outs):
assert self.parents[-1] == name
self.parents.pop()
return grad_outs
return PopState.apply
def __enter__(self):
self.flop_counts.clear()
super().__enter__()
def __exit__(self, *args):
macs = sum(self.flop_counts['Global'].values())
flops = 2 * macs # flops = 2 * macs approximately
self.total_flops = flops
for mod in self.flop_counts.keys():
self.per_module_flops[mod] = dict()
for k, v in self.flop_counts[mod].items():
mod_macs = v
mod_flops = mod_macs * 2
self.per_module_flops[mod][k] = mod_flops
super().__exit__(*args)
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
out = func(*args, **kwargs)
func_packet = func._overloadpacket # pylint: disable=protected-access
if func_packet in flop_mapping:
flop_count = flop_mapping[func_packet](args, normalize_tuple(out))
for par in self.parents:
self.flop_counts[par][func_packet] += flop_count
return out
|
dm_nevis-master
|
experiments_torch/training/resources.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.