python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerJoint
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerJointTest(unittest.TestCase):
... | GeneSplice-main | GeneSplice/apex/apex/contrib/test/transducer/test_transducer_joint.py |
import unittest
import torch
SKIP_TEST = None
try:
from apex.contrib.transducer import TransducerLoss
from apex.contrib.transducer import _transducer_ref as transducer_ref
except ImportError as e:
SKIP_TEST = e
@unittest.skipIf(SKIP_TEST, f"{SKIP_TEST}")
class TransducerLossTest(unittest.TestCase):
... | GeneSplice-main | GeneSplice/apex/apex/contrib/test/transducer/test_transducer_loss.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/transducer/__init__.py | |
import unittest
import torch
from torch.testing._internal import common_utils
SKIP_TEST = None
from apex.transformer.testing.distributed_test_base import NcclDistributedTestBase
try:
from apex.contrib.peer_memory import PeerMemoryPool, PeerHaloExchanger1d
except ImportError as e:
SKIP_TEST = e
# How to run:
... | GeneSplice-main | GeneSplice/apex/apex/contrib/test/peer_memory/test_peer_halo_exchange_module.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/peer_memory/__init__.py | |
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistribution... | GeneSplice-main | GeneSplice/apex/apex/contrib/test/fmha/test_fmha.py |
GeneSplice-main | GeneSplice/apex/apex/contrib/test/fmha/__init__.py | |
try:
import torch
import focal_loss_cuda
from .focal_loss import focal_loss
del torch
del focal_loss_cuda
del focal_loss
except ImportError as err:
print("apex was installed without --focal_loss flag, apex.contrib.focal_loss is not available")
| GeneSplice-main | GeneSplice/apex/apex/contrib/focal_loss/__init__.py |
import torch
import focal_loss_cuda
class FocalLoss(torch.autograd.Function):
@staticmethod
def forward(
ctx,
cls_output,
cls_targets_at_level,
num_positives_sum,
num_real_classes,
alpha,
gamma,
label_smoothing=0.0,
):
loss, partial_... | GeneSplice-main | GeneSplice/apex/apex/contrib/focal_loss/focal_loss.py |
import torch
import xentropy_cuda
class SoftmaxCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False):
losses, max_log_sum_exp = xentropy_cuda.forward(
logits, labels, smoothing, half_to_float)
l... | GeneSplice-main | GeneSplice/apex/apex/contrib/xentropy/softmax_xentropy.py |
from .softmax_xentropy import SoftmaxCrossEntropyLoss
__all__ = [
"SoftmaxCrossEntropyLoss",
]
| GeneSplice-main | GeneSplice/apex/apex/contrib/xentropy/__init__.py |
from .layer_norm import FastLayerNorm
| GeneSplice-main | GeneSplice/apex/apex/contrib/layer_norm/__init__.py |
import torch
from torch.nn import init
from apex._autocast_utils import _cast_if_autocast_enabled
import fast_layer_norm
class FastLayerNormFN(torch.autograd.Function):
@staticmethod
def forward(ctx, x, gamma, beta, epsilon):
x = x.contiguous()
gamma = gamma.contiguous()
beta = beta.c... | GeneSplice-main | GeneSplice/apex/apex/contrib/layer_norm/layer_norm.py |
import types
import torch
import importlib
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam:... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/fused_adam.py |
from .fp16_optimizer import FP16_Optimizer
from .fused_adam import FusedAdam
from .fused_lamb import FusedLAMB
| GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/__init__.py |
import collections
import contextlib
import enum
import inspect
import io
import itertools
import threading
from typing import List, Optional
import torch
from torch.distributed.distributed_c10d import _get_default_group
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
import distributed_adam_cuda... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/distributed_fused_adam.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer.
Designed only to wrap apex.contrib.optimizers.FusedAdam, FusedSGD.
Refer to apex.fp16_utils documents for more information... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/fp16_optimizer.py |
import torch
import importlib
import math
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cu... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/fused_lamb.py |
import types
import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
This version of fused SGD implements 2 fusions.
* Fusion of the SGD ... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/fused_sgd.py |
import os
import math
import inspect
import torch
import importlib
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
import torch.distributed.distributed_c10d as c10d
# Fallback to private fields if using older PyTorch version
try:
import torch.distributed.distributed_c10d.get_process_group_ra... | GeneSplice-main | GeneSplice/apex/apex/contrib/optimizers/distributed_fused_lamb.py |
import torch
import torch.distributed as dist
from torch import nn
import nccl_p2p_cuda as inc
import peer_memory_cuda as pm
# Communication free halo exchanger.
# NB! This halo exchanger does not exchange halos with neighbors as it should, it merely swaps the inputs
# NB! This is only useful for performance testing.
... | GeneSplice-main | GeneSplice/apex/apex/contrib/bottleneck/halo_exchangers.py |
from .bottleneck import Bottleneck, SpatialBottleneck
from .halo_exchangers import HaloExchangerNoComm, HaloExchangerAllGather, HaloExchangerSendRecv, HaloExchangerPeer
| GeneSplice-main | GeneSplice/apex/apex/contrib/bottleneck/__init__.py |
import torch
from bottleneck import Bottleneck
torch.manual_seed(23337)
# use True to print layerwise sum for all outputs in reference code path
DEBUG = False#True
for stride, o_channel in [(1,32), (1,128), (2,32)]:
print("testing stride ==", stride, ", in_channel == 32 , out_channel ==", o_channel)
a_ = torc... | GeneSplice-main | GeneSplice/apex/apex/contrib/bottleneck/test.py |
import functools as func
import torch
import torch.distributed as dist
from torch import nn
from apex import check_cudnn_version_and_warn
import fast_bottleneck
import nccl_p2p_cuda as inc
assert check_cudnn_version_and_warn(__name__, 8400)
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu... | GeneSplice-main | GeneSplice/apex/apex/contrib/bottleneck/bottleneck.py |
import pdb
import torch
from torch.autograd import gradcheck
from apex import check_cudnn_version_and_warn
import fused_conv_bias_relu
check_cudnn_version_and_warn(__name__, 8400)
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(... | GeneSplice-main | GeneSplice/apex/apex/contrib/conv_bias_relu/conv_bias_relu.py |
from .conv_bias_relu import ConvBiasReLU, ConvBias, ConvBiasMaskReLU, ConvFrozenScaleBiasReLU
| GeneSplice-main | GeneSplice/apex/apex/contrib/conv_bias_relu/__init__.py |
import torch
import fast_multihead_attn
class FastEncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_weights,
... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/fast_encdec_multihead_attn_func.py |
import torch
import fast_multihead_attn
class MaskSoftmaxDropout(torch.autograd.Function):
@staticmethod
def forward(ctx, is_training, heads, inputs, pad_mask, mask_additive, dropout_prob):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = tor... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/mask_softmax_dropout_func.py |
import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/self_multihead_attn_func.py |
from .self_multihead_attn import SelfMultiheadAttn
from .encdec_multihead_attn import EncdecMultiheadAttn
from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func
| GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/__init__.py |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
from apex.no... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/self_multihead_attn.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import fast_multihea... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/fast_encdec_multihead_attn_norm_add_func.py |
import torch
import torch.nn.functional as F
class EncdecAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs_q,
inputs_kv,
input_weights_q,
input_weights_kv,
output_w... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/encdec_multihead_attn_func.py |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_multihead_attn_func import encdec_attn_func
from .fast_encdec_multihead_attn_func import fast_encdec_attn_func
from .fast_encdec_multihead_attn_norm_add_func import fast_encdec_attn_norm_add_func
... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/encdec_multihead_attn.py |
import torch
import fast_multihead_attn
class FastSelfAttnNormAddFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
lyr_nrm_gamma_weights,
lyr_nrm_beta_weights,
input_weights,
output... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/fast_self_multihead_attn_norm_add_func.py |
import torch
import fast_multihead_attn
class FastSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
pad_m... | GeneSplice-main | GeneSplice/apex/apex/contrib/multihead_attn/fast_self_multihead_attn_func.py |
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | GeneSplice-main | GeneSplice/apex/apex/contrib/examples/multihead_attn/perf_test_multihead_attn.py |
import torch
import torch.nn.functional as F
import argparse
from apex.contrib.multihead_attn import SelfMultiheadAttn
from apex.contrib.multihead_attn import EncdecMultiheadAttn
parser = argparse.ArgumentParser(description='Multihead Attention Standalone Test')
parser.add_argument('--seq-length', default=64, type=in... | GeneSplice-main | GeneSplice/apex/apex/contrib/examples/multihead_attn/func_test_multihead_attn.py |
from .clip_grad import clip_grad_norm_
| GeneSplice-main | GeneSplice/apex/apex/contrib/clip_grad/__init__.py |
from typing import Union, Iterable
import torch
_kernel_import_succeeded = False
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
_kernel_import_succeeded = True
except ImportError:
_kernel_import_succeeded = False
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tens... | GeneSplice-main | GeneSplice/apex/apex/contrib/clip_grad/clip_grad.py |
import torch
import transducer_loss_cuda
import transducer_joint_cuda
class TransducerJoint(torch.nn.Module):
"""Transducer joint
Detail of this loss function can be found in: Sequence Transduction with Recurrent Neural
Networks
Arguments:
pack_output (bool, optional): whether to pack the out... | GeneSplice-main | GeneSplice/apex/apex/contrib/transducer/transducer.py |
from .transducer import TransducerJoint
from .transducer import TransducerLoss
from . import _transducer_ref
| GeneSplice-main | GeneSplice/apex/apex/contrib/transducer/__init__.py |
import torch
def transducer_loss_reference(x, label, f_len, y_len, blank_idx, loss_grad):
def log_sum_exp(a, b):
if (a >= b):
return a + torch.log(1 + torch.exp(b-a))
else:
return b + torch.log(1 + torch.exp(a-b))
def forward_alpha(x, label, f_len, y_len, blank_idx):
... | GeneSplice-main | GeneSplice/apex/apex/contrib/transducer/_transducer_ref.py |
import torch
from apex.contrib.peer_memory import PeerMemoryPool
import peer_memory_cuda as pm
class PeerHaloExchanger1d:
def __init__(self, ranks, rank_in_group, peer_pool, half_halo):
self.peer_group_size = len(ranks)
self.ranks = ranks
self.peer_rank = rank_in_group
self.low_neig... | GeneSplice-main | GeneSplice/apex/apex/contrib/peer_memory/peer_halo_exchanger_1d.py |
from .peer_memory import PeerMemoryPool
from .peer_halo_exchanger_1d import PeerHaloExchanger1d
| GeneSplice-main | GeneSplice/apex/apex/contrib/peer_memory/__init__.py |
import torch
import numpy as np
import peer_memory_cuda as pm
class PeerMemoryPool(object):
def __init__(self, static_size, dynamic_size, peer_ranks=None):
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
ngpus = min(torch.cuda.device_count(), world_size)... | GeneSplice-main | GeneSplice/apex/apex/contrib/peer_memory/peer_memory.py |
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributio... | GeneSplice-main | GeneSplice/apex/apex/contrib/fmha/fmha.py |
from .fmha import FMHAFun
| GeneSplice-main | GeneSplice/apex/apex/contrib/fmha/__init__.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedNovoGrad(torch.optim.Optimizer):
"""Implements NovoGrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
Th... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_novograd.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_adam.py |
from .fused_sgd import FusedSGD
from .fused_adam import FusedAdam
from .fused_novograd import FusedNovoGrad
from .fused_lamb import FusedLAMB
from .fused_adagrad import FusedAdagrad
from .fused_mixed_precision_lamb import FusedMixedPrecisionLamb
| GeneSplice-main | GeneSplice/apex/apex/optimizers/__init__.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdagrad(torch.optim.Optimizer):
"""Implements Adagrad algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_adagrad.py |
import torch
from copy import deepcopy
from itertools import chain
from collections import defaultdict, abc as container_abcs
from apex.multi_tensor_apply import multi_tensor_applier
class FusedMixedPrecisionLamb(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, step=0, bias_correction=True,
... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_mixed_precision_lamb.py |
import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This versi... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_lamb.py |
import torch
from torch.optim.optimizer import Optimizer, required
from apex.multi_tensor_apply import multi_tensor_applier
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-... | GeneSplice-main | GeneSplice/apex/apex/optimizers/fused_sgd.py |
import types
from ..fp16_utils import master_params_to_model_params
from ..multi_tensor_apply import multi_tensor_applier
from ._amp_state import maybe_print
import torch
from ..optimizers import FusedSGD
class AmpOptimizerState(object):
def __init__(self):
pass
def _master_params_to_model_params(self):... | GeneSplice-main | GeneSplice/apex/apex/amp/_process_optimizer.py |
import torch
# True for post-0.4, when Variables/Tensors merged.
def variable_is_tensor():
v = torch.autograd.Variable()
return isinstance(v, torch.Tensor)
def tensor_is_variable():
x = torch.Tensor()
return type(x) == torch.autograd.Variable
# False for post-0.4
def tensor_is_float_tensor():
x =... | GeneSplice-main | GeneSplice/apex/apex/amp/compat.py |
import contextlib
import warnings
import sys
import torch
from . import utils
from .opt import OptimWrapper
from .scaler import LossScaler
from ._amp_state import _amp_state, master_params, maybe_print
if torch.distributed.is_available():
from ..parallel.LARC import LARC
# There's no reason to expose the notion... | GeneSplice-main | GeneSplice/apex/apex/amp/handle.py |
import collections.abc as container_abcs
from types import MethodType
import functools
import sys
import warnings
import numpy as np
import torch
from ._amp_state import _amp_state, warn_or_err
from .handle import disable_casts
from .scaler import LossScaler
from ._process_optimizer import _process_optimizer
from ape... | GeneSplice-main | GeneSplice/apex/apex/amp/_initialize.py |
import functools
import itertools
import torch
from . import compat, rnn_compat, utils, wrap
from .handle import AmpHandle, NoOpHandle
from .lists import functional_overrides, torch_overrides, tensor_overrides
from ._amp_state import _amp_state
from .frontend import *
_DECORATOR_HANDLE = None
_USER_CAST_REGISTRY = ... | GeneSplice-main | GeneSplice/apex/apex/amp/amp.py |
from collections import OrderedDict
import torch
from ._initialize import _initialize
from ._amp_state import _amp_state, warn_or_err, maybe_print
class Properties(object):
"""
This class has two purposes: to establish a set of default properties,
and to route setting of these attributes through __setat... | GeneSplice-main | GeneSplice/apex/apex/amp/frontend.py |
from .amp import init, half_function, float_function, promote_function,\
register_half_function, register_float_function, register_promote_function
from .handle import scale_loss, disable_casts
from .frontend import initialize, state_dict, load_state_dict
from ._amp_state import master_params, _amp_state
| GeneSplice-main | GeneSplice/apex/apex/amp/__init__.py |
import torch
from ..multi_tensor_apply import multi_tensor_applier
from ._amp_state import _amp_state, master_params, maybe_print
from itertools import product
def scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False):
# Exception handling for 18.04 compatibility
if check_overflow:
... | GeneSplice-main | GeneSplice/apex/apex/amp/scaler.py |
VERSION = (0, 1, 0)
__version__ = '.'.join(map(str, VERSION))
| GeneSplice-main | GeneSplice/apex/apex/amp/__version__.py |
import contextlib
import warnings
from .scaler import LossScaler, master_params
from ._amp_state import maybe_print
import numpy as np
class OptimWrapper(object):
def __init__(self, optimizer, amp_handle, num_loss):
self._optimizer = optimizer
self._amp_handle = amp_handle
self._num_loss ... | GeneSplice-main | GeneSplice/apex/apex/amp/opt.py |
# This is a "header object" that allows different amp modules to communicate.
# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
# But apparently it's ok:
# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
import torch
class AmpState(object):
def... | GeneSplice-main | GeneSplice/apex/apex/amp/_amp_state.py |
from . import compat
import functools
import itertools
import torch
def is_cuda_enabled():
return torch.version.cuda is not None
def get_cuda_version():
return tuple(int(x) for x in torch.version.cuda.split('.'))
def is_fp_tensor(x):
if is_nested(x):
# Fast-fail version of all(is_fp_tensor)
... | GeneSplice-main | GeneSplice/apex/apex/amp/utils.py |
from . import compat
from . import utils
from ._amp_state import _amp_state
from . import rnn_compat
import functools
import torch
def make_cast_wrapper(orig_fn, cast_fn, handle,
try_caching=False):
@functools.wraps(orig_fn)
def wrapper(*args, **kwargs):
if not handle.is_active(... | GeneSplice-main | GeneSplice/apex/apex/amp/wrap.py |
from . import utils, wrap
import torch
_VF = torch._C._VariableFunctions
RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
def _gen_VF_wrapper(name):
def wrapper(*args, **kwargs):
return getattr(_VF, name)(*args, **kwargs)
return wrapper
# Some python magic to generate an object that has the rnn ce... | GeneSplice-main | GeneSplice/apex/apex/amp/rnn_compat.py |
GeneSplice-main | GeneSplice/apex/apex/amp/lists/__init__.py | |
import torch
from .. import utils
MODULE = torch
FP16_FUNCS = [
# Low level functions wrapped by torch.nn layers.
# The wrapper layers contain the weights which are then passed in as a parameter
# to these functions.
'conv1d',
'conv2d',
'conv3d',
'conv_transpose1d',
'conv_transpose2d'... | GeneSplice-main | GeneSplice/apex/apex/amp/lists/torch_overrides.py |
# TODO: think about the following two. They do weird things.
# - torch.nn.utils.clip_grad (but it should always be fp32 anyway)
# - torch.nn.utils.weight_norm
# Notes:
# F.instance_norm uses batch_norm internally. Which correctly handles
# fp16 in/out with fp32 weights. So we shouldn't do anything for
# either of... | GeneSplice-main | GeneSplice/apex/apex/amp/lists/functional_overrides.py |
from .. import compat
from . import torch_overrides
import importlib
import torch
# if compat.variable_is_tensor() and not compat.tensor_is_variable():
MODULE = torch.Tensor
# else:
# MODULE = torch.autograd.Variable
FP16_FUNCS = compat.filter_attrs(MODULE, [
'__matmul__',
])
FP32_FUNCS = compat.filter_at... | GeneSplice-main | GeneSplice/apex/apex/amp/lists/tensor_overrides.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
def is_iterable(maybe_iterable):
return isinstance(maybe_iterable, list) or isinstance(maybe_iterable, tuple)
def flatten_list(tens_list):
"""
flatten_list
"""
if not is_iterable(... | GeneSplice-main | GeneSplice/apex/apex/RNN/RNNBackend.py |
import torch
from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell
from apex import deprecated_warning
from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell
from .cells import mLSTMRNNCell, mLSTMCell
def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0):
"""
... | GeneSplice-main | GeneSplice/apex/apex/RNN/models.py |
from .models import LSTM, GRU, ReLU, Tanh, mLSTM
__all__ = ['models']
| GeneSplice-main | GeneSplice/apex/apex/RNN/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .RNNBackend import RNNCell
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
import math
class mLSTMRNNCell(RNNCell):
"""
mLSTMRNNCell
"""
def __init__(self, input_size, hidden_size, bias = False, output_... | GeneSplice-main | GeneSplice/apex/apex/RNN/cells.py |
from .mlp import *
| GeneSplice-main | GeneSplice/apex/apex/mlp/__init__.py |
from copy import copy
import math
import torch
from torch import nn
from apex._autocast_utils import _cast_if_autocast_enabled
import mlp_cuda
class MlpFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, activation, *args):
output = mlp_cuda.forward(bias, activation, args)
... | GeneSplice-main | GeneSplice/apex/apex/mlp/mlp.py |
import torch
import torch.distributed as dist
from torch.nn import Parameter
from torch.nn import Module
from apex.parallel import DistributedDataParallel as DDP
import argparse
import os
parser = argparse.ArgumentParser(description='allreduce hook example')
parser.add_argument("--local_rank", default=0, type=int)
ar... | GeneSplice-main | GeneSplice/apex/tests/distributed/DDP/ddp_race_condition_test.py |
import torch
import argparse
import os
from apex import amp
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
from apex.parallel import DistributedDataParallel
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatica... | GeneSplice-main | GeneSplice/apex/tests/distributed/amp_master_params/amp_master_params.py |
import torch
model_params_rank0 = torch.load("rank0model.pth",
map_location = lambda storage, loc: storage.cuda(0))
model_params_rank1 = torch.load("rank1model.pth",
map_location = lambda storage, loc: storage.cuda(0))
master_params_rank0 = torch.load("rank0m... | GeneSplice-main | GeneSplice/apex/tests/distributed/amp_master_params/compare.py |
import torch
import apex
model = apex.parallel.SyncBatchNorm(4).cuda()
model.weight.data.uniform_()
model.bias.data.uniform_()
data = torch.rand((8,4)).cuda()
model_ref = torch.nn.BatchNorm1d(4).cuda()
model_ref.load_state_dict(model.state_dict())
data_ref = data.clone()
output = model(data)
output_ref = model_ref(d... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/test_batchnorm1d.py |
import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, ... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/two_gpu_unit_test.py |
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from apex.parallel import SyncBatchNorm as ApexSyncBatchNorm
import argparse
import os
import numpy as np
var_batch = 16
def compare(desc, inp1, inp2, error= 1e-5):
a = inp1.clone().detach().cpu().numpy()
b = inp2... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/two_gpu_test_different_batch_size.py |
import torch
import numpy as np
import apex
if True:
print("using setup tools")
import syncbn
else:
print("using jit")
from torch.utils.cpp_extension import load
syncbn = load(name='syncbn', sources=['../../csrc/syncbn.cpp', '../../csrc/welford.cu'])
def compare(desc, inp1, inp2, error):
a = in... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/single_gpu_unit_test.py |
import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, ... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/test_groups.py |
import torch
import numpy as np
import apex
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error... | GeneSplice-main | GeneSplice/apex/tests/distributed/synced_batchnorm/python_single_gpu_unit_test.py |
"""L0 Tests Runner.
How to run this script?
1. Run all the tests: `python /path/to/apex/tests/L0/run_test.py` If you want an xml report,
pass `--xml-report`, i.e. `python /path/to/apex/tests/L0/run_test.py --xml-report` and
the file is created in `/path/to/apex/tests/L0`.
2. Run one of the tests (e.g. fused l... | GeneSplice-main | GeneSplice/apex/tests/L0/run_test.py |
import torch
from apex.normalization import FusedLayerNorm
from apex.normalization import FusedRMSNorm
from apex.normalization import MixedFusedLayerNorm
from apex.normalization import MixedFusedRMSNorm
from torch.testing._internal import common_utils
from torch.testing._internal.common_device_type import instantiate_... | GeneSplice-main | GeneSplice/apex/tests/L0/run_fused_layer_norm/test_fused_layer_norm.py |
import unittest
import os
import torch
from torch.optim import Optimizer
import apex
from apex.multi_tensor_apply import multi_tensor_applier
from itertools import product
class RefLAMB(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BE... | GeneSplice-main | GeneSplice/apex/tests/L0/run_optimizers/test_lamb.py |
GeneSplice-main | GeneSplice/apex/tests/L0/run_optimizers/__init__.py | |
import copy
import math
import random
import unittest
import torch
import torch.nn.functional as F
from torch import nn
try:
import apex
except ImportError as e:
HAS_APEX = False
else:
HAS_APEX = True
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
sel... | GeneSplice-main | GeneSplice/apex/tests/L0/run_optimizers/test_adam.py |
from itertools import product
import random
import unittest
import torch
import apex
class TestFusedOptimizer(unittest.TestCase):
def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7):
self.max_abs_diff = max_abs_diff
self.max_rel_diff = max_rel_diff
self.iters = iters
torc... | GeneSplice-main | GeneSplice/apex/tests/L0/run_optimizers/test_fused_optimizer.py |
import torch
from torch.optim import Optimizer
import math
import apex
import unittest
from test_fused_optimizer import TestFusedOptimizer
from itertools import product
class Novograd(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dic... | GeneSplice-main | GeneSplice/apex/tests/L0/run_optimizers/test_fused_novograd.py |
import logging
import unittest
import torch
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.testing.distributed_test_base import Ncc... | GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_p2p_comm.py |
import subprocess
import os
from apex.transformer.testing.commons import TEST_SUCCESS_MESSAGE
def run_gpt(cmd):
args = list(cmd.split(" "))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = p.communicate()
outs = list(str((outs).decode("utf-8")).splitlines())
... | GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/gpt_scaling_test.py |
from typing import Tuple, List
import torch
import unittest
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.schedules.common import (
_get_params_for_weight_decay_optimization, build_model
)
from apex.tra... | GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_dynamic_batchsize.py |
"""Test for fused softmax functions.
Ref: https://github.com/NVIDIA/Megatron-LM/blob/40becfc96c4144985458ac0e0fae45dbb111fbd2/megatron/fused_kernels/tests/test_fused_kernels.py
""" # NOQA
import itertools
import torch
from torch.testing._internal import common_utils
from apex.transformer import AttnMaskType
from ap... | GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_fused_softmax.py |
import logging
from typing import Tuple
import torch
import torch.nn.functional as F
from torch.testing._internal import common_utils
logging.getLogger("torch").setLevel(logging.WARNING)
from apex.transformer import parallel_state
from apex.transformer import tensor_parallel
from apex.transformer.tensor_parallel imp... | GeneSplice-main | GeneSplice/apex/tests/L0/run_transformer/test_cross_entropy.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.