python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
Pegasus-master | pegasus/ImageBind/models/__init__.py | |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from types import SimpleNamespace
import torch
i... | Pegasus-master | pegasus/ImageBind/models/imagebind_model.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/... | Pegasus-master | pegasus/ImageBind/models/transformer.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gzip
import html
import io
import math
from functools import lru_cache
from typing ... | Pegasus-master | pegasus/ImageBind/models/multimodal_preprocessors.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import einops
import numpy as np
import torch
import torch.nn as nn
class Normalize(nn... | Pegasus-master | pegasus/ImageBind/models/helpers.py |
from setuptools import setup, find_packages
setup(
name = 'the-compiler',
packages = find_packages(exclude=[]),
version = '0.0.6',
license='MIT',
description = 'The Compiler ',
author = 'Kye Gomez',
author_email = 'kye@apac.ai',
long_description_content_type = 'text/markdown',
url = 'https://github.c... | the-compiler-main | setup.py |
from the_compiler import TheCompiler
api_key = "" # Your OpenAI API key
create = "a simple calculator program"
compiler = TheCompiler(api_key)
code = compiler.run(create)
print("Generated Code:\n", code)
| the-compiler-main | example.py |
the-compiler-main | the_compiler/__init__.py | |
from swarms import Swarms
class Architect:
def __init__(self, create, api_key):
self.create = create
self.boss_node = boss_node(openai_api_key=api_key)
def generate_architecture(self):
objective = f"""
Create an architectural analysis specification in markdown in the most opti... | the-compiler-main | the_compiler/main.py |
import gzip
import random
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.utils.data import DataLoader, Dataset
from Andromeda.model import Andromeda
from Andromeda.core.transformer import Decoder, Transformer
from Andromeda.core.autoregressive_wrapper import AutoregressiveWrapper
... | Andromeda-master | train_simple.py |
import torch
from Andromeda.model import Andromeda
model = Andromeda().cuda()
x = torch.randint(0, 256, (1, 1024)).cuda()
model(x) # (1, 1024, 20000) | Andromeda-master | example.py |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
# import bitsandbytes as bnb
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from acce... | Andromeda-master | train.py |
from Andromeda.model import Andromeda
Andromeda1Billion = Andromeda(
num_tokens=25000,
max_seq_len=4192,
dim=2048,
depth=16,
dim_head=128,
heads=8,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=4,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2, ... | Andromeda-master | Andromeda/configs.py |
# from Andromeda.train import Train
from Andromeda.model import AndromedaTokenizer, Andromeda
from Andromeda.train import Train, train | Andromeda-master | Andromeda/__init__.py |
from torch.nn import Module
from transformers import AutoTokenizer
from Andromeda.core.transformer import (
Decoder,
Transformer,
)
from Andromeda.core.autoregressive_wrapper import AutoregressiveWrapper
class AndromedaTokenizer:
def __init__(self):
self.tokenizer= AutoTokenizer.from_pretrained(... | Andromeda-master | Andromeda/model.py |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
########### SETUP CONFIG
import torch.distributed as dist
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.state import Acceler... | Andromeda-master | Andromeda/train.py |
import torch
from transformers import AutoTokenizer
from einops._torch_specific import allow_ops_in_compiled_graph
import argparse
# class AndromedaEval:
# def __init__(self, path, seed=42, device=None):
# self.path = path
# self.seed = seed
# self.device = device
# if self.devic... | Andromeda-master | Andromeda/inference.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, ... | Andromeda-master | Andromeda/core/autoregressive_wrapper.py |
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
| Andromeda-master | Andromeda/core/__init__.py |
from functools import partial
from typing import Optional
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange, repeat
# constants
... | Andromeda-master | Andromeda/core/attend.py |
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from dataclasses import dataclass
from typing import Callable, List, Optional
from einops import rearrange, repeat, reduce
from ... | Andromeda-master | Andromeda/core/transformer.py |
import torch
import triton
import triton.language as tl
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk,... | Andromeda-master | Andromeda/core/flash.py |
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
... | Andromeda-master | Andromeda/utils/stable_adamw.py |
Andromeda-master | Andromeda/utils/__init__.py | |
import torch
# from palm_rlhf_pytorch.palm import LayerNorm
from torch.nn import LayerNorm
from torch.optim import AdamW
# from palm.utils import print_main
from Andromeda.utils.helpers import print_main
from Andromeda.utils.stable_adamw import StableAdamWUnfused
# optimizers
def decoupled_optimizer(
model: tor... | Andromeda-master | Andromeda/utils/decoupled_optimizer.py |
import math
import torch
from torch import einsum, _nnpack_available
import torch.nn.functional as F
from torch import nn
from einops import rearrange
import copy
from pathlib import PurePath
from tqdm import tqdm_gui
from beartype import beartype
from beartype.typing import Tuple, Optional
from einops import rearra... | Andromeda-master | Andromeda/utils/rf_utils.py |
import torch.distributed as dist # Add this line
def print_num_params(model):
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
if dist.is_available():
if dist.get_rank() == 0:
print(f"Number of parameters in model: {n_params}")
else:
print(f"Number of p... | Andromeda-master | Andromeda/utils/helpers.py |
import unittest
from Andromeda.dataset_builder import DatasetBuilder
class TestDatasetBuilder(unittest.TestCase):
def setUp(self):
self.builder = DatasetBuilder(dataset_name="tiiuae/falcon-refinedweb")
def test_initialization(self):
self.assertEqual(self.builder.dataset_name, "tiiuae/falcon-r... | Andromeda-master | testing/dataset_builder.py |
import torch
import unittest
from Andromeda.model import Andromeda
class TestAndromeda(unittest.TestCase):
def setUp(self):
self.model = Andromeda()
def test_initialization(self):
self.assertIsNotNone(self.model.andromeda, "Transformer is not initialized.")
self.assertIsNotNone(self.m... | Andromeda-master | testing/model.py |
import unittest
from Andromeda.model import AndromedaTokenizer
class TestAndromedaTokenizer(unittest.TestCase):
def setUp(self):
self.tokenizer = AndromedaTokenizer()
def test_initialization(self):
self.assertIsNotNone(self.tokenizer.tokenizer, "Tokenizer is not initialized.")
self.as... | Andromeda-master | testing/tokenizer.py |
import matplotlib.pyplot as plt
import time
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import tracemalloc
# from Andromeda.model import Andromeda
from Andromeda.model import Andromeda
from Andromeda.utils.stable_adamw import StableAdamWUnfused
... | Andromeda-master | testing/accuracy.py |
import matplotlib.pyplot as plt
import time
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import tracemalloc
# from Andromeda.model import Andromeda
from Andromeda.model import Andromeda
from Andromeda.utils.stable_adamw import StableAdamWUnfused
... | Andromeda-master | testing/benchmarking.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Run pytest using MCP."""
import argparse
import time
from mcli.sdk import (RunConfig, RunStatus, create_run, follow_run_logs,
stop_run, wait_for_run_status)
if __name__ == '__main__':
parser = argparse.... | Andromeda-master | .github/mcp/mcp_pytest.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
from io import open
from setuptools import find_packages, setup
setup(
name="zetascale",
version="0.0.3",
author="Zeta Team",
author_email="kye@apac.ai",
description="Transformers at zeta scales",
long_descr... | zeta-main | setup.py |
import torch
from zeta import FlashAttention
q = torch.randn(2, 4, 6, 8)
k = torch.randn(2, 4, 10, 8)
v = torch.randn(2, 4, 10, 8)
attention = FlashAttention(causal=False, dropout=0.1, flash=False)
output = attention(q, k, v)
print(output.shape) | zeta-main | example.py |
#architecture
from zeta.models import *
from zeta.models.andromeda import Andromeda
#models
from zeta.models.gpt4 import GPT4, GPT4MultiModal
from zeta.models.palme import PalmE
#######
from zeta.nn import *
from zeta.nn.architecture.transformer import (
AttentionLayers,
Decoder,
Encoder,
Transformer... | zeta-main | zeta/__init__.py |
import os
from logging import getLogger
from typing import List, Optional
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class SentencePieceTokenizer:
"""
A SentencePieceTokenizer is a tokenizer that uses a pretrained SentencePiece model to convert text into tokens and vice versa.
... | zeta-main | zeta/tokenizers/sentence_piece.py |
from zeta.tokenizers.language_tokenizer import LanguageTokenizerGPTX
from zeta.tokenizers.multi_modal_tokenizer import MultiModalTokenizer
from zeta.tokenizers.sentence_piece import SentencePieceTokenizer | zeta-main | zeta/tokenizers/__init__.py |
from transformers import AutoTokenizer
class LanguageTokenizerGPTX:
def __init__(self):
self.tokenizer= AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
... | zeta-main | zeta/tokenizers/language_tokenizer.py |
import logging
import torch
from transformers import CLIPProcessor, AutoTokenizer
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class MultiModalTokenizer:
"""
A tokenizer class for the kosmos model
Attributes:
processor(CLIPProcessor): The processor ... | zeta-main | zeta/tokenizers/multi_modal_tokenizer.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
# attention
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
from zeta.nn.architecture.local_transformer import LocalTransformer
from zeta.nn.architecture.parallel_transformer import ParallelTransformer... | zeta-main | zeta/nn/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from zeta.nn.attention.base import BaseAttention
from zeta.nn.embeddings.multiway_network import MultiwayWra... | zeta-main | zeta/nn/attention/multihead_attention.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from zeta.nn.attention.local_attention import LocalAttention
from zeta.utils.main import default, exists, l2norm
class LocalMHA(nn.Module):
def __init__(
self,
*,
dim,
window_size,
... | zeta-main | zeta/nn/attention/local_attention_mha.py |
import math
import warnings
from typing import Dict, Optional, Type
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from zeta.nn.attention.base import BaseAttention
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type... | zeta-main | zeta/nn/attention/multiquery_attention.py |
import torch
from torch import nn
from typing import Optional, Any
from zeta.nn.attention.attend import Attend
class MultiGroupQueryAttention(nn.Module):
def __init__(
self,
dim,
heads: int = None,
softmax_scale: Optional[float] = None,
attn_pdrop: float... | zeta-main | zeta/nn/attention/multi_group_attention.py |
"""Zeta Halo"""
#attentions
from zeta.nn.attention.dilated_attention import DilatedAttention
from zeta.nn.attention.flash_attention import FlashAttention
from zeta.nn.attention.flash_attention2 import FlashAttentionTwo
# from zeta.nn.attention.cross_attention import CrossAttend
from zeta.nn.attention.multihead_attent... | zeta-main | zeta/nn/attention/__init__.py |
import math
import torch
from einops import rearrange
from torch import einsum, nn
from torch.autograd.function import Function
from torch.cuda.amp import GradScaler, autocast
from torch.nn import DataParallel
from zeta.nn.attention.base import BaseAttention
# constants
EPSILON = 1e-10
# helper functions
def exists... | zeta-main | zeta/nn/attention/flash_attention2.py |
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from typing import Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from packaging import version
from torch import Tensor, einsum, nn
# constants
EfficientAttentionConf... | zeta-main | zeta/nn/attention/attend.py |
from typing import Optional, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import Tensor, nn
from zeta.nn.attention.flash_attention import FlashAttention
from zeta.nn.biases.relative_position_bias import RelativePositionBias
from zeta.nn.embeddings.xpos_re... | zeta-main | zeta/nn/attention/dilated_attention.py |
# import torch
# import torch.nn as nn
# from einops import rearrange
# from einops_exts import check_shape, rearrange_many
# class SpatialLinearAttention(nn.Module):
# def __init__(self,
# dim: int = None,
# heads: int = 4,
# dim_head: int = 32):
# sup... | zeta-main | zeta/nn/attention/spatial_linear_attention.py |
from collections import namedtuple
from dataclasses import dataclass
from functools import wraps
import torch
import torch.nn.functional as F
from einops import rearrange
from packaging import version
from torch import Tensor, einsum, nn
from zeta.nn.attention.base import BaseAttention
# constants
EfficientAttenti... | zeta-main | zeta/nn/attention/flash_attention.py |
from zeta.nn.architecture.transformer import AttentionLayers
class CrossAttend(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend=True,
only_cross=True,
**kwargs) | zeta-main | zeta/nn/attention/cross_attention.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from zeta.nn.embeddings.sinusoidal import SinusoidalEmbeddings, apply_rotary_pos_emb
from zeta.utils.main import exists, default, pad_to_multiple, l2norm, look_around, max_neg_values
#constant
TOKEN_SELF_ATTN_VALUE = -5e4
c... | zeta-main | zeta/nn/attention/local_attention.py |
import math
import torch
import torch.nn.functional as F
from torch import Tensor, nn, einsum
from typing import Tuple, Optional
from einops import rearrange, repeat, reduce, pack, unpack
from zeta.models.vit import exists
from zeta.nn.architecture.attn_layers import RMSNorm, apply_rotary_pos_emb
from zeta.nn.attenti... | zeta-main | zeta/nn/attention/mixture_attention.py |
from abc import abstractmethod
import torch.nn as nn
class BaseAttention(nn.Module):
@abstractmethod
def __init__(self):
super().__init__()
@abstractmethod
def forward(self, x, context=None, mask=None):
pass | zeta-main | zeta/nn/attention/base.py |
import torch
import torch.nn as nn
def fixed_pos_embedding(x):
"""
Generates fixed positional embeddings for the input tensor.
Args:
- x: Input tensor of shape (seq_len, dim)
Returns:
- sin: Sine positional embeddings of shape (seq_len, dim)
- cos: Cosine positional embeddings of ... | zeta-main | zeta/nn/embeddings/xpos_relative_position.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
from torch import nn
from zeta.nn.embeddings.base import BaseEmbedding
#Other embedding
class NominalEmbedding(BaseEmbedding):
def forward(self,
num_tokens: int,
dim: int) -> nn.Module:
... | zeta-main | zeta/nn/embeddings/nominal_embeddings.py |
import torch
from torch import nn
from zeta.utils.main import exists, l2norm
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed=False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
... | zeta-main | zeta/nn/embeddings/abc_pos_emb.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import copy
import torch
import torch.nn as nn
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(... | zeta-main | zeta/nn/embeddings/multiway_network.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from abc import ABC, abstractmethod
import bitsandbytes as bnb
class BaseEmbedding(ABC):
@abstractmethod
def forward(self, num_tokens: int, dim: int) -> nn.Module:
#custom embedding function
... | zeta-main | zeta/nn/embeddings/embedding.py |
# embeddings
from zeta.nn.embeddings.rope import RotaryEmbedding
from zeta.nn.embeddings.xpos_relative_position import XPOS, rotate_every_two, apply_rotary_pos_emb
from zeta.nn.embeddings.multiway_network import MultiwayEmbedding, MultiwayNetwork, MultiwayWrapper
from zeta.nn.embeddings.bnb_embedding import BnBEmbeddi... | zeta-main | zeta/nn/embeddings/__init__.py |
import torch
from torch import nn
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__ini... | zeta-main | zeta/nn/embeddings/vision_emb.py |
#from paper:: https://arxiv.org/pdf/2308.10882.pdf
import torch
from torch import nn
class TruncatedRotaryEmbedding(nn.Module):
def __init__(
self,
dim,
a,
b,
rho
):
super().__init__()
self.dim = dim
self.a = a
self.b... | zeta-main | zeta/nn/embeddings/truncated_rope.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import bitsandbytes as bnb
from zeta.nn.embeddings.base import BaseEmbedding
class BnBEmbedding(BaseEmbedding):
def forward(self,
num_tokens: int,
dim: int,
padding_idx) -> bnb.... | zeta-main | zeta/nn/embeddings/bnb_embedding.py |
import torch
from torch import nn, einsum
from einops import rearrange
def exists(val):
return val is not None
class SinusoidalEmbeddings(nn.Module):
def __init__(
self,
dim,
scale_base = None,
use_xpos = False
):
super().__init__()
inv_freq = 1. / (10000 *... | zeta-main | zeta/nn/embeddings/sinusoidal.py |
#prompts to jquesnelle
# https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaDynamicYaRNScaledRotaryEmbedding.py
import torch
from torch import nn
import math
#helpers
#inveerse dim formula to find dim based on number of rotations
def find_correction_dim(
num_rotations,
dim,
base=... | zeta-main | zeta/nn/embeddings/yarn.py |
#from paper:: https://arxiv.org/pdf/2308.10882.pdf
import torch
from torch import nn
from einops import rearrange
def exists(val):
return val is not None
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos=False,
scale_base=512,
interpolation_factor=1.,... | zeta-main | zeta/nn/embeddings/rope.py |
from torch import nn
from abc import ABC, abstractmethod
class BaseEmbedding(ABC):
@abstractmethod
def forward(self, num_tokens: int, dim: int) -> nn.Module:
#custom embedding function
embedding = ...
return embedding | zeta-main | zeta/nn/embeddings/base.py |
import torch
from torch import nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_toke... | zeta-main | zeta/nn/embeddings/vis_lang_emb.py |
import torch
from torch import nn
import torch.nn.functional as F
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (... | zeta-main | zeta/nn/embeddings/positional.py |
from zeta.nn.architecture.attn_layers import AttentionLayers
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal=True, **kwargs)
| zeta-main | zeta/nn/architecture/decoder.py |
import math
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from torch... | zeta-main | zeta/nn/architecture/attn_layers.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from zeta.nn.architecture.decoder import Decoder
from zeta.nn.architecture.encoder import Encoder
class EncoderDecoder(nn.Module):
def __init__(
self,
args,
encoder_embed_tokens=No... | zeta-main | zeta/nn/architecture/encoder_decoder.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from zeta.nn.attention.attend import Attend as Attention
# functions and decorators
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def identity(t, *args, **kwargs):... | zeta-main | zeta/nn/architecture/parallel_transformer.py |
zeta-main | zeta/nn/architecture/__init__.py | |
from zeta.nn.architecture.attn_layers import AttentionLayers
class Encoder(AttentionLayers):
def __init__(self,
**kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal=False, **kwargs)
| zeta-main | zeta/nn/architecture/encoder.py |
from einops import rearrange
from torch import nn
import torch
import torch.nn.functional as F
from zeta.nn.attention.local_attention_mha import LocalMHA
from zeta.nn.biases.dynamic_position_bias import DynamicPositionBias
from zeta.nn.modules import feedforward_network
from zeta.utils.main import eval_decorator, exist... | zeta-main | zeta/nn/architecture/local_transformer.py |
from inspect import isfunction
import math
from abc import ABC, abstractmethod
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import ... | zeta-main | zeta/nn/architecture/transformer.py |
import math
from functools import partial
from itertools import zip_longest
from typing import Tuple
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import einsum, nn
from vector_quantize_pytorch import RandomProjectionQuantizer
fr... | zeta-main | zeta/nn/architecture/hierarchical_transformer.py |
from zeta.nn.architecture.attn_layers import AttentionLayers
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend=True, only_cross=True, **kwargs)
| zeta-main | zeta/nn/architecture/cross_attender.py |
import torch
import torch.nn.functional as F
from einops import pack, rearrange, unpack
from torch import nn
from zeta.utils.main import ( # noqa: E402
eval_decorator,
exists,
once, # noqa: F401
)
from zeta.utils.main import top_a, top_k, top_p
class AutoregressiveWrapper(nn.Module):
def __init_... | zeta-main | zeta/nn/architecture/auto_regressive_wrapper.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn as nn
from zeta.nn.biases.base import BaseBias
class RelativePositionBias(BaseBias):
def __init__(
self,
bidirectional: int = True,
num_buckets: int =32,
... | zeta-main | zeta/nn/biases/relative_position_bias.py |
zeta-main | zeta/nn/biases/__init__.py | |
import torch
from torch import nn
from einops import rearrange
class DynamicPositionBias(nn.Module):
def __init__(
self,
dim,
heads
):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(1, dim),
nn.SiLU(),
nn.Linear(dim, dim),
... | zeta-main | zeta/nn/biases/dynamic_position_bias.py |
from abc import abstractmethod
import torch.nn as nn
class BaseBias(nn.Module):
@abstractmethod
def __init__(self,
num_heads):
super().__init__()
self.num_heads
@abstractmethod
def forward(self):
pass
| zeta-main | zeta/nn/biases/base.py |
import math
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from zeta.nn.biases.base import BaseBias
from einops import rearrange
######## Helpers
def exists(val):
return val is not None
def pad_at_dim(t, pad, dim=-1, value=0.):
dims_from_right = (- dim - 1) if dim < 0 else (t... | zeta-main | zeta/nn/biases/alibi.py |
#from lucirains rt-1
from torch import nn
from einops import pack, unpack, repeat, reduce, rearrange
#helpers
def pack_one(x, pattern):
return pack([x], pattern)
def unpack_one(x, ps, pattern):
return unpack(x, ps, pattern)[0]
#main
class TokenLearner(nn.Module):
def __init__(
self,
... | zeta-main | zeta/nn/modules/token_learner.py |
from torch import nn
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
| zeta-main | zeta/nn/modules/residual.py |
import torch.nn as nn
class AdaptiveParameterList(nn.ParameterList):
"""
A container that allows for parameters to adapt their values
based on the learning process
Example:
```
def adaptation_function(param):
return param * 0.9
adaptive_params = AdaptiveParamet... | zeta-main | zeta/nn/modules/adaptive_parameter_list.py |
import torch
from torch import nn
class LN(nn.Module):
def __init__(self,
dim=None,
eps=None
):
self.dim = dim
self.eps = eps
def forward(self):
nn.LayerNorm(self.dim, self.eps)
def subln(x):
return x + LN(x) | zeta-main | zeta/nn/modules/sublayer.py |
import torch
from torch import nn
class DynamicModule(nn.Module):
"""
A container that allows for dynamic addition, removal, and modification
of modules
examples
````
dynamic_module = DynamicModule()
dynamic_module.add('linear', nn.Linear(10, 10))
dynamic_module.add('relu', nn.ReLU())
... | zeta-main | zeta/nn/modules/dynamic_module.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from timm.models.layers import drop_path
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
... | zeta-main | zeta/nn/modules/droppath.py |
# modules
from zeta.nn.modules.lora import Lora
from zeta.nn.modules.feedforward_network import FeedForwardNetwork
from zeta.nn.modules.droppath import DropPath
from zeta.nn.modules.token_learner import TokenLearner
| zeta-main | zeta/nn/modules/__init__.py |
import torch
from torch import nn
from einops import Reduce, Rearrange
class DropSample(nn.Module):
def __init__(self, prob=0):
super().__init__()
self.prob = prob
def forward(self, x):
device = x.device
if self.prob == 0. or (not self.training):
return x
... | zeta-main | zeta/nn/modules/mbconv.py |
import torch
from torch import nn
class Lora(nn.Module):
def __init__(
self,
dim,
dim_out,
r=8,
alpha=None
):
super().__init__()
self.scale = alpha / r
self.A = nn.Parameter(torch.randn(dim, r))
self.B = nn.Parameter(torch.randn(r, dim_... | zeta-main | zeta/nn/modules/lora.py |
import torch
import torch.nn.functional as F
def token_shift(t):
t, t_shift = t.chunk(2, dim=1)
t_shift = F.pad(t_shift, (0, 0, 1, -1))
return torch.cat((t, t_shift), dim=-1)
| zeta-main | zeta/nn/modules/token_shift.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .xmoe.global_groups import g... | zeta-main | zeta/nn/modules/feedforward_network.py |
import torch
from torch import nn
import torch.nn.functional as F
class RMSNorm(nn.Module):
def __init__(
self,
dim,
groups=1
):
super().__init__()
self.scale = dim ** -0.5
self.gamma = nn.Parameter(torch.ones(groups, dim, 1))
def forward(self, x):
... | zeta-main | zeta/nn/modules/rms_norm.py |
import torch.distributed as dist
def _find_my_group_index(grouped_ranks):
my_rank = dist.get_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def get_moe_group(moe_expert_count=None):
if dist.is_initialized():
if not hasattr... | zeta-main | zeta/nn/modules/xmoe/global_groups.py |
# Copyright (c) 2022 Agora
# Licensed under The MIT License [see LICENSE for details]
| zeta-main | zeta/nn/modules/xmoe/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.