python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
FLASHATTENION-LION-OPTIMIZE-main
training/src/callbacks/__init__.py
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/fault_tolerance.py from typing import Any from pathlib import Path import pytorch_lightning as pl class ModelCheckpointMine(pl.callbacks.model_checkpoint.ModelCheckpoint): def __init__(self, *args, fault_toleran...
FLASHATTENION-LION-OPTIMIZE-main
training/src/callbacks/model_checkpoint.py
from typing import Any from pytorch_lightning import Callback, Trainer, LightningModule from pytorch_lightning.utilities import rank_zero_only from pytorch_lightning.utilities.parsing import AttributeDict class ParamsLog(Callback): """Log the number of parameters of the model """ def __init__(self, total...
FLASHATTENION-LION-OPTIMIZE-main
training/src/callbacks/params_log.py
# Adapted from https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/lr_monitor.py. from typing import Any from pytorch_lightning import Callback, Trainer from pytorch_lightning.utilities import rank_zero_only from pytorch_lightning.strategies import DeepSpeedStrategy class LossScaleM...
FLASHATTENION-LION-OPTIMIZE-main
training/src/callbacks/loss_scale_monitor.py
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py from itertools import chain from pathlib import Path import pickle from typing import Any, List, Union import subprocess import mmap from multiprocessing.shared_memory import SharedMemory import numpy ...
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/language_modeling_hf.py
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/imagenet_datamodule.py import os from pathlib import Path from typing import Any, List, Union, Callable, Optional import torch from torch.utils.data import Dataset, DataLoader, SequentialSampler from torch.utils.data.da...
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/imagenet.py
import torch from timm.data import Mixup from timm.data.mixup import mixup_target class TimmMixup(Mixup): """ Wrap timm.data.Mixup that avoids the assert that batch size must be even. """ def __call__(self, x, target): if self.mode == 'elem': lam = self._mix_elem(x) elif self....
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/timm_mixup.py
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397 from typing import Iterator import math import torch from torch.utils.data import RandomSampler, DistributedSampler class RandomFaultTolerantSampler(RandomSa...
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/fault_tolerant_sampler.py
# Copied from https://github.com/stanford-crfm/mistral/blob/main/src/corpora/detokenization.py # Which was originally from https://github.com/NVIDIA/Megatron-LM/blob/aed2f75e209e525c842aec7c044af7acae2a4614/tasks/zeroshot_gpt/detokenizer.py """ Handle detokenization for different dataset for zero-shot LM evaluation. "...
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/datasets/detokenizer.py
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py # Except we don't pad the last block and don't use overlapping eval # And we return both the input and the target import math import numpy as np import torch class LMDataset(torch.utils.data.Dataset): def __init__(self,...
FLASHATTENION-LION-OPTIMIZE-main
training/src/datamodules/datasets/lm_dataset.py
import inspect import torch.nn as nn import hydra try: from apex.contrib.layer_norm import FastLayerNorm except ImportError: FastLayerNorm = None from src.models.modules.seq_common import PositionalEncoding def group_parameters_for_optimizer(model, optimizer_cfg, bias_weight_decay=False, ...
FLASHATTENION-LION-OPTIMIZE-main
training/src/optim/param_grouping.py
import torch from torch.optim import Optimizer from timm.scheduler import CosineLRScheduler # We need to subclass torch.optim.lr_scheduler._LRScheduler, or Pytorch-lightning will complain class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler): """ Wrap timm.scheduler.CosineLRSchedu...
FLASHATTENION-LION-OPTIMIZE-main
training/src/optim/timm_lr_scheduler.py
# Meant to work with Apex's DistributeFusedAdam from typing import Any, Callable, Dict, List, Optional, Union from pathlib import Path import types import torch from torch.optim.optimizer import Optimizer from torch.optim import LBFGS from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam f...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/ddp_zero2.py
import collections import math import os import pathlib import re import pynvml pynvml.nvmlInit() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affini...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/gpu_affinity.py
import re from pathlib import Path import torch import math from einops import rearrange def load_checkpoint(path, device='cpu'): path = Path(path).expanduser() is_deepspeed = False if path.is_dir(): # DeepSpeed checkpoint is_deepspeed = True latest_path = path / 'latest' if lates...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/checkpoint.py
# Copied from https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py from __future__ import division from __future__ import unicode_literals from typing import Iterable, Optional import weakref import copy import contextlib import torch def to_float_maybe(x): return x.float() if x.dtype in [torch.flo...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/ema.py
# Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/benchmark.py import torch try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnaly...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/flops.py
# Copied from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in com...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/distributed.py
import logging import warnings from typing import List, Sequence import pytorch_lightning as pl import rich.syntax import rich.tree from omegaconf import DictConfig, OmegaConf from pytorch_lightning.utilities import rank_zero_only # Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-ma...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/utils.py
# Meant to work with Pytorch's ZeroRedundancyOptimizer from typing import Any, Callable, Dict, List, Optional, Union from pathlib import Path import torch from torch.optim.optimizer import Optimizer from torch.distributed.optim import ZeroRedundancyOptimizer from pytorch_lightning.strategies.ddp import DDPStrategy f...
FLASHATTENION-LION-OPTIMIZE-main
training/src/utils/ddp_zero1.py
import math from functools import partial from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair import hydra from einops import reduce, rearrange def pooling(x, pooling_mode='CLS', key_padding_mask=None, batch_first=True): ...
FLASHATTENION-LION-OPTIMIZE-main
training/src/models/modules/seq_common.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.layers.rotary import apply_rotary_emb_func, apply_rotary_emb_torch is_sm8x = torch.cuda.get_device_capability('cuda') >= (8, 0) @pytest.mark.parametrize('dtype', ([torch.float16] if not is_sm8x else...
FLASHATTENION-LION-OPTIMIZE-main
tests/test_rotary.py
import math from functools import partial import torch import torch.nn.functional as F import pytest from einops import rearrange, repeat from flash_attn.flash_attn_interface import flash_attn_func, flash_attn_unpadded_qkvpacked_func, _get_block_size, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_func from...
FLASHATTENION-LION-OPTIMIZE-main
tests/test_flash_attn.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.losses.cross_entropy import CrossEntropyLossApex is_sm8x = torch.cuda.get_device_capability('cuda')[0] >= 8 @pytest.mark.parametrize('dtype', [torch.float16, torch.float32] + ([torch.bfloat16] if is...
FLASHATTENION-LION-OPTIMIZE-main
tests/losses/test_cross_entropy.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/losses/test_cross_entropy_parallel.py import math import torch import torch.nn.functional as F import pytest from apex.transformer import parallel_state from apex.transformer import tensor_parallel from flash_attn.losses.cross_entropy imp...
FLASHATTENION-LION-OPTIMIZE-main
tests/losses/test_cross_entropy_parallel.py
import re import torch import pytest from transformers import OPTConfig from transformers.models.opt.modeling_opt import OPTForCausalLM from flash_attn.models.gpt import GPTLMHeadModel from flash_attn.models.opt import remap_state_dict_opt, opt_config_to_gpt2_config from flash_attn.utils.pretrained import state_dict...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_opt.py
import os import re import time import torch import pytest from einops import rearrange from transformers import GPT2Config, GPT2Tokenizer, OPTConfig, AutoTokenizer from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel as GPT2LMHeadModelHF from transformers.models.opt.modeling_opt import OPTForCausalLM ...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_gpt_generation.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/models/test_gpt_generation_parallel.py -k "parallel" import os import re import torch import pytest from einops import rearrange from transformers import GPT2Config, GPT2Tokenizer from transformers.models.gpt2.modeling_gpt2 import GPT2LMHe...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_gpt_generation_parallel.py
import re import torch import pytest from transformers import GPT2Config from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel as GPT2LMHeadModelHF from flash_attn.models.gpt import GPTLMHeadModel from flash_attn.models.gpt import remap_state_dict_gpt2 from flash_attn.utils.pretrained import state_dict_...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_gpt.py
import re import torch import pytest from timm.models.vision_transformer import vit_base_patch16_224 from flash_attn.models.vit import vit_base_patch16_224 as flash_vit_base_patch16_224 @pytest.mark.parametrize('fused_mlp', [False, True]) # @pytest.mark.parametrize('fused_mlp', [False]) @pytest.mark.parametrize('o...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_vit.py
import re from collections import OrderedDict import torch import torch.nn.functional as F import pytest from einops import rearrange from transformers import BertConfig from transformers.models.bert.modeling_bert import BertModel as BertModelHF from transformers.models.bert.modeling_bert import BertForPreTraining a...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_bert.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/models/test_gpt_parallel.py import math import torch import torch.nn as nn import torch.nn.functional as F import pytest from einops import rearrange from transformers import GPT2Config from apex.transformer import parallel_state from f...
FLASHATTENION-LION-OPTIMIZE-main
tests/models/test_gpt_parallel.py
import math import torch import torch.nn.functional as F import pytest from einops import rearrange, repeat from flash_attn.ops.layer_norm import DropoutAddLayerNorm, dropout_add_layer_norm from flash_attn.ops.layer_norm import dropout_add_layer_norm_subset from flash_attn.ops.rms_norm import DropoutAddRMSNorm, drop...
FLASHATTENION-LION-OPTIMIZE-main
tests/ops/test_dropout_layer_norm.py
import math from functools import partial import torch import torch.nn.functional as F import pytest from einops import rearrange from flash_attn.ops.fused_dense import FusedDense, FusedMLP @pytest.mark.parametrize('dtype', [torch.float16, torch.bfloat16]) @pytest.mark.parametrize('return_residual', [False, True])...
FLASHATTENION-LION-OPTIMIZE-main
tests/ops/test_fused_dense.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/ops/test_fused_dense_parallel.py import math import torch import torch.nn.functional as F import pytest from apex.transformer import parallel_state from apex.transformer import tensor_parallel from flash_attn.ops.fused_dense import FusedD...
FLASHATTENION-LION-OPTIMIZE-main
tests/ops/test_fused_dense_parallel.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_embedding_parallel.py import torch import torch.nn as nn import torch.nn.functional as F import pytest from einops import rearrange from apex.transformer import parallel_state from flash_attn.modules.embedding import GPT2Embe...
FLASHATTENION-LION-OPTIMIZE-main
tests/modules/test_embedding_parallel.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_block_parallel.py import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import pytest from einops import rearrange from apex.transformer import parallel_state from apex.t...
FLASHATTENION-LION-OPTIMIZE-main
tests/modules/test_block_parallel.py
# Run test with: # torchrun --no_python --nproc_per_node=8 pytest -q -s tests/modules/test_mha_parallel.py import math import torch import torch.nn.functional as F import pytest from einops import rearrange from apex.transformer import parallel_state from apex.transformer import tensor_parallel from flash_attn.mod...
FLASHATTENION-LION-OPTIMIZE-main
tests/modules/test_mha_parallel.py
from functools import partial import math import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat from flash_attn.utils.benchmark import benchmark_forward, benchmark_all, pytorch_profiler from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func # f...
FLASHATTENION-LION-OPTIMIZE-main
benchmarks/benchmark_causal.py
from functools import partial import math import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat from flash_attn.utils.benchmark import benchmark_all, benchmark_forward, benchmark_backward, benchmark_combined from flash_attn.bert_padding import unpad_input, pad_input f...
FLASHATTENION-LION-OPTIMIZE-main
benchmarks/benchmark_flash_attention.py
# [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py # for benchmarking. # We added support for seqlen=2k and seqlen=4k # coding=utf-8 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "L...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/fused_softmax.py
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/fmha.py import torch import torch.nn as nn import flash_attn_cuda def convert_blockmask(blockmask, causal): """Convert from the 0-1 format to the format used by the CUDA code. 0 means th...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_blocksparse_attn_interface.py
import math import torch import torch.nn as nn from einops import rearrange import hydra from flash_attn.flash_blocksparse_attn_interface import flash_blocksparse_attn_func from flash_attn.flash_blocksparse_attn_interface import convert_blockmask from flash_attn.bert_padding import unpad_input, pad_input, index_firs...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_blocksparse_attention.py
# Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py import torch import torch.nn.functional as F from einops import rearrange, repeat class IndexFirstAxis(torch.autograd.Function): @staticmethod def forward(ctx, input, indice...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/bert_padding.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/__init__.py
# [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py # for benchmarking. # We fixed a few dtype cast to make it work for bf16 """ Fused Attention =============== This is a Triton implementation of the Flash Attention algorithm (see: Dao et al., https://arxi...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_attn_triton_og.py
import math import torch import torch.nn as nn from einops import rearrange from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.bert_padding import unpad_input, pad_input class FlashAttention(nn.Module): """Implement the scaled dot product attention with softmax. A...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_attention.py
""" *Experimental* implementation of FlashAttention in Triton. We use the FlashAttention implementation from Phil Tillet a starting point. https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py Changes: - Implement both causal and non-causal attention. - Implement both self-attention and ...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_attn_triton.py
import torch import torch.nn as nn import torch.nn.functional as F import flash_attn_cuda def _get_block_size(device, head_dim, is_dropout): assert head_dim % 8 == 0 and head_dim <= 128 return 256 if head_dim <= 64 else 128 def _flash_attn_forward(q, k, v, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/flash_attn_interface.py
# Inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/cross_entropy.py # But we make it much faster: we compute the local loss and the LSE, and by exchanging the LSE and # the losses we can get the global loss. There's no need to do it step by step # (compute local max, exchange, com...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/losses/cross_entropy.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/losses/__init__.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/layers/__init__.py
# We use the same API as https://github.com/rwightman/pytorch-image-models/blob/v0.6.11/timm/models/layers/patch_embed.py # But we use nn.Linear instead of Conv2d and it's about 8x faster. from functools import partial import torch.nn as nn from torch import _assert from torch.nn.modules.utils import _pair from eino...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/layers/patch_embed.py
# Inspired by https://github.com/facebookresearch/xformers/blob/main/xformers/components/positional_embedding/rotary.py from typing import Tuple import math import torch from einops import rearrange, repeat import rotary_emb def rotate_half(x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/layers/rotary.py
import torch from transformers.utils import WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.utils import is_remote_url from transformers.modeling_utils import load_state_dict from transformers.utils.hub import cached_file, get_checkpoint_shard_files def state_dict_from_pretrained(model_name, device=None, dtype=No...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/utils/pretrained.py
# Copyright (c) 2023, Tri Dao. # Adapted from https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/forward_step.py#L31 from typing import Optional, Union, Sequence, Callable import gc import time from dataclasses import dataclass, field from collections import na...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/utils/generation.py
# Copyright (c) 2022, Tri Dao. """ Useful functions for writing test code. """ import torch import torch.utils.benchmark as benchmark def benchmark_forward(fn, *inputs, repeats=10, desc='', verbose=True, amp=False, amp_dtype=torch.float16, **kwinputs): """ Use Pytorch Benchmark on the forwa...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/utils/benchmark.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/utils/__init__.py
from typing import Optional import torch from torch import Tensor from torch.distributed import ProcessGroup # `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for # `_all_gather_base` and `_reduce_scatter_base`. They require the most recent # version of PyTorch. The following 4 lines are for...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/utils/distributed.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/models/__init__.py
# Copyright (c) 2023, Tri Dao. import math import re from collections import OrderedDict import torch import torch.nn.functional as F from transformers import GPT2Config, OPTConfig def remap_state_dict_opt(state_dict, config): def key_mapping_model(key): key = re.sub(r'^model.decoder.', 'transformer.'...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/models/opt.py
# Copyright (c) 2022, Tri Dao. # Inspired by / adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py import math import re from functools import partial from copy import deepcopy from collections import OrderedDict import torch import torch.nn as nn import torch....
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/models/vit.py
# Copyright (c) 2022, Tri Dao. # This BERT implementation is based on our MLPerf 2.0 and MLPerf 2.1 BERT implementation. # https://github.com/mlcommons/training_results_v2.0/blob/main/HazyResearch/benchmarks/bert/implementations/pytorch/modeling.py # https://github.com/mlcommons/training_results_v2.1/blob/main/Azure-Ha...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/models/bert.py
# Copyright (c) 2023, Tri Dao. import logging import math import re from functools import partial from collections import namedtuple, OrderedDict from collections.abc import Sequence import torch import torch.nn as nn import torch.nn.functional as F from transformers import GPT2Config from einops import rearrange ...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/models/gpt.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/__init__.py
# Copyright (c) 2023, Tri Dao. # Inspired by https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py # We make it work with pytorch amp and with bfloat16. # The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py from typ...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/fused_dense.py
# Copyright (c) 2022, Tri Dao. # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py import torch from torch.nn import init from flash_attn.ops.layer_norm import DropoutAddLayerNormFn, DropoutAddLayerNormSubsetFn def rms_norm(x, weight, epsilon): return DropoutAddLayerN...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/rms_norm.py
# Copyright (c) 2022, Tri Dao. # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py import torch from torch.nn import init import dropout_layer_norm def _dropout_add_layer_norm_forward(x0, residual, gamma, beta, rowscale, colscale, dropout_p, ...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/layer_norm.py
# Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py import math import torch from torch import nn # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 # sqrt(2/pi) -> 0.79788456 # this function is tanh approximation...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/gelu_activation.py
# Adapted on https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py # and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py from typing import Optional import torch import triton import triton.language as tl from torch.autograd.function import FunctionCtx from torch....
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/triton/linear.py
# Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import math from e...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/triton/k_activations.py
# The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared # to naive implementation. import torch import torch.nn as nn import torch.nn.functional as F from torch.cuda.amp import custom_bwd, custom_fwd import fused_dense_lib as fused_dense_cuda from flash_attn.ops.triton.linear import triton...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/ops/triton/mlp.py
# Copyright (c) 2022, Tri Dao. import torch import torch.nn as nn from torch import Tensor from einops import rearrange from flash_attn.utils.distributed import reduce_scatter, all_reduce class GPT2Embeddings(nn.Module): def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None, ...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/modules/embedding.py
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/modules/__init__.py
# Copyright (c) 2022, Tri Dao. import torch import torch.nn as nn import torch.nn.functional as F try: from flash_attn.ops.fused_dense import FusedMLP, ParallelFusedMLP except ImportError: FusedMLP, ParallelFusedMLP = None, None class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/modules/mlp.py
# Copyright (c) 2022, Tri Dao. from typing import Optional from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torchvision.ops import StochasticDepth from flash_attn.modules.mha import MHA from flash_attn.modules.mlp import Mlp try: fro...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/modules/block.py
# Copyright (c) 2022, Tri Dao. import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange try: from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func from flash_attn.flash_attn_interface import flash_attn_...
FLASHATTENION-LION-OPTIMIZE-main
flash_attn/modules/mha.py
#!/usr/bin/env python """The setup script.""" import io from os import path as op from setuptools import setup, find_packages with open("README.md") as readme_file: readme = readme_file.read() here = op.abspath(op.dirname(__file__)) # get the dependencies and installs with io.open(op.join(here, "requirements.t...
segment-geospatial-main
setup.py
#!/usr/bin/env python """Tests for `samgeo` package.""" import os import unittest from samgeo import samgeo class TestCommon(unittest.TestCase): """Tests for the common.py module.""" def setUp(self): """Set up test fixtures, if any.""" def tearDown(self): """Tear down test fixtures, i...
segment-geospatial-main
tests/test_common.py
"""Unit test package for samgeo."""
segment-geospatial-main
tests/__init__.py
#!/usr/bin/env python """Tests for `samgeo` package.""" import os import unittest from samgeo import samgeo class TestSamgeo(unittest.TestCase): """Tests for `samgeo` package.""" def setUp(self): """Set up test fixtures, if any.""" bbox = [-122.1497, 37.6311, -122.1203, 37.6458] im...
segment-geospatial-main
tests/test_samgeo.py
"""Top-level package for segment-geospatial.""" __author__ = """Qiusheng Wu""" __email__ = 'giswqs@gmail.com' __version__ = '0.8.0' from .samgeo import *
segment-geospatial-main
samgeo/__init__.py
""" The source code is adapted from https://github.com/aliaksandr960/segment-anything-eo. Credit to the author Aliaksandr Hancharenka. """ import os import tempfile import cv2 import numpy as np from tqdm import tqdm import shapely import pyproj import rasterio import geopandas as gpd import matplotlib.pyplot as plt ...
segment-geospatial-main
samgeo/common.py
"""The LangSAM model for segmenting objects from satellite images using text prompts. The source code is adapted from the https://github.com/luca-medeiros/lang-segment-anything repository. Credits to Luca Medeiros for the original implementation. """ import os import warnings import argparse import numpy as np import...
segment-geospatial-main
samgeo/text_sam.py
""" The source code is adapted from https://github.com/aliaksandr960/segment-anything-eo. Credit to the author Aliaksandr Hancharenka. """ import os import cv2 import torch import numpy as np from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor from .common import * class SamGeo:...
segment-geospatial-main
samgeo/samgeo.py
import torch from infinite.main import LMInfinite d_model = 512 seq_len = 100 n_global = 100 l_pretrain = 50 #sample q = torch.randn(1, seq_len, d_model) k = torch.randn(1, seq_len, d_model) v = torch.randn(1, seq_len, d_model) #llm infinite mode model = LMInfinite( d_model, n_global, l_pretrain ) #fo...
LM-Infinite-main
example.py
from infinite.main import LMInfinite
LM-Infinite-main
infinite/__init__.py
import torch from torch import nn import math import torch.nn.functional as F class LMInfinite(nn.Module): def __init__( self, d_model, n_global=100, l_pretrain=2048 ): super(LMInfinite, self).__init__() self.d_model = d_model self.n_global = n_globa...
LM-Infinite-main
infinite/main.py
import argparse def parse_args(): parser = argparse.ArgumentParser(description='MOSS-RLHF @Fudan NLP Group') # Path parser.add_argument('--model_save_path', type=str, default='', help='checkpoint path, used for save model and training') parser.add_argument('--policy_model_path', type=str, default='', ...
MOSS-RLHF-main
config.py
import time import math import random import logging from typing import List import numpy as np import torch import torch.nn as nn from config import parse_args from ppo.ppo_trainer import PPOTrainer from ppo.ppo_datahelper import get_tokenizer from utils import * from transformers.models.llama.modeling_llama import Ll...
MOSS-RLHF-main
train_ppo.py
MOSS-RLHF-main
__init__.py
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LIC...
MOSS-RLHF-main
merge_weight_zh.py
from typing import List, Optional, Any, Dict import math from accelerate import Accelerator import torch from torch.utils.tensorboard import SummaryWriter class Metric: def __init__(self): pass def add(self, val): raise NotImplementedError def val(self) -> float: raise NotImplemen...
MOSS-RLHF-main
metric.py
import torch import torch.nn.functional as F import logging from accelerate import Accelerator from accelerate.state import AcceleratorState from typing import Tuple accelerator = None def setup_accelerator(): global accelerator if accelerator is None: accelerator = Accelerator(split_batches=True) ...
MOSS-RLHF-main
utils.py
MOSS-RLHF-main
ppo/__init__.py
import time, math, os import torch import torch.nn as nn import torch.optim as optim from typing import Dict, Any, Tuple, List from torch.utils.data import DataLoader from .ppo_datahelper import * from utils import * from metric import MeanMetric, PPLMetric, SumMetric, RealtimeMetric from accelerate import Accelerator ...
MOSS-RLHF-main
ppo/ppo_trainer.py
import os import random import logging import torch import json import copy from typing import List, Dict, Any, Tuple from transformers.models.llama.tokenization_llama import LlamaTokenizer from torch.utils.data import get_worker_info, IterableDataset from utils import print_rank_0, pad_sequences human_prompt = "<|Hum...
MOSS-RLHF-main
ppo/ppo_datahelper.py
import tqdm.auto as tqdm import torch.nn.functional as F from typing import Optional, Dict, Sequence from typing import List, Optional, Tuple, Union import transformers from dataclasses import dataclass, field from Model.RadFM.multimodality_model import MultiLLaMAForCausalLM import torch from transformers import AutoMo...
RadFM-main
Quick_demo/test.py
import torch.nn as nn import torch.nn.functional as F import torch from .helpers import PerceiverResampler from .utils import get_visual_encoder from einops import rearrange, repeat from einops_exts import rearrange_many import torchvision from .vit_3d import ViT from einops.layers.torch import Rearrange from .tra...
RadFM-main
Quick_demo/Model/RadFM/my_embedding_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Various positional encodings for the transformer. """ import math import torch from torch import nn from einops.layers.torch import Rearrange from einops import rearrange, repeat class PositionEmbeddingSine(nn.Module): """ This is a mor...
RadFM-main
Quick_demo/Model/RadFM/position_encoding.py