python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, ... | CyberTron-master | cybertron/models/robotics_transformer/film_efficientnet/film_efficientnet_encoder.py |
from setuptools import setup
setup(name='gato', packages=['gato'])
| CyberTron-master | cybertron/models/gato2/setup.py |
import argparse
import random
import os
import wandb
import torch
from gato.utils.utils import DotDict
from gato.policy.gato_policy import GatoPolicy
from gato.envs.setup_env import load_envs
from gato.training.trainer import Trainer
from gato.tasks.control_task import ControlTask
def main(args):
exp_id = random... | CyberTron-master | cybertron/models/gato2/train.py |
import argparse
import os
import json
import time
import torch
from gato.utils.utils import DotDict
from gato.policy.gato_policy import GatoPolicy
from gato.envs.setup_env import load_envs
from gato.tasks.control_task import ControlTask
def main(args):
# load checkpoint
gato_checkpoint = torch.load(args.mod... | CyberTron-master | cybertron/models/gato2/eval.py |
CyberTron-master | cybertron/models/gato2/gato/__init__.py | |
from abc import ABC
class Task(ABC):
def __init__(self):
pass
def sample_batch(self, vanilla_batch_size, prompted_batch_size, device, max_tokens=1024):
pass
def evaluate(self, model, n_iterations):
pass
| CyberTron-master | cybertron/models/gato2/gato/tasks/task.py |
import gymnasium as gym
import numpy as np
import torch
import minari
from minari.dataset.minari_dataset import EpisodeData
from gato.tasks.task import Task
supported_spaces = [
gym.spaces.Box,
gym.spaces.Discrete,
]
def tokens_per_space(space):
if type(space) == gym.spaces.Box:
return space.shap... | CyberTron-master | cybertron/models/gato2/gato/tasks/control_task.py |
CyberTron-master | cybertron/models/gato2/gato/tasks/__init__.py | |
CyberTron-master | cybertron/models/gato2/gato/training/__init__.py | |
import time
import os
import wandb
import numpy as np
import torch
from gato.utils.utils import save_model
class Trainer:
def __init__(
self,
model,
optimizer,
tasks,
exp_name,
args
):
self.model = model
self.optimizer = optimizer
self.t... | CyberTron-master | cybertron/models/gato2/gato/training/trainer.py |
import os
import json
import torch
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def save_model(model, save_dir, save_name, args):
# create save dir if not exists
if not os.path.ex... | CyberTron-master | cybertron/models/gato2/gato/utils/utils.py |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License... | CyberTron-master | cybertron/models/gato2/gato/transformers/trajectory_gpt2.py |
CyberTron-master | cybertron/models/gato2/gato/transformers/__init__.py | |
from __future__ import annotations
from typing import Optional, Union, Tuple
import math
import torch
import torch.nn as nn
from transformers.models.openai import OpenAIGPTConfig, OpenAIGPTPreTrainedModel
from transformers.models.openai.modeling_openai import (
Attention as _Attention,
BaseModelOutput,
Conv... | CyberTron-master | cybertron/models/gato2/gato/transformers/gpt.py |
import gymnasium as gym
from gymnasium.wrappers import AtariPreprocessing, TransformReward
import numpy as np
def load_atari_env(env_name: str, load_kwargs: dict):
assert 'v5' in env_name
repeat_action_probability = 0 # 0.25
clip_rewards = True
repeat_action_probability = load_kwargs.get('repeat_ac... | CyberTron-master | cybertron/models/gato2/gato/envs/atari.py |
import minari
import gymnasium as gym
from gato.envs.atari import load_atari_env
custom_env_loaders = {
'ALE/': load_atari_env
}
def load_envs(dataset_names: list, load_kwargs: dict = {}):
envs = []
datasets = []
for dataset_name in dataset_names:
env, dataset = load_env_dataset(dataset_name... | CyberTron-master | cybertron/models/gato2/gato/envs/setup_env.py |
CyberTron-master | cybertron/models/gato2/gato/data/__init__.py | |
import os
import gdown
datasets = {
'd4rl_halfcheetah-expert-v2': 'https://drive.google.com/drive/folders/1GqE2c3oqutBYLOvP-l6cSZ1F7mqs7DOS?usp=drive_link',
'd4rl_hopper-expert-v2': 'https://drive.google.com/drive/folders/1vl4GsvHDE6Pm7UAzDE1YxC8AIaGknMrp?usp=drive_link',
'd4rl_walker2d-expert-v2': 'https:... | CyberTron-master | cybertron/models/gato2/gato/data/download_custom_datasets.py |
import torch
import math
def mu_law(tensor, mu=100, M=256):
return torch.sign(tensor) * torch.log(1 + mu * torch.abs(tensor)) / math.log(1 + mu*M) #torch.log(1 + mu*M)
class ContinuousTokenizer:
def __init__(self, use_mu_law=True, mu=100, M=256, n_bins=1024, offset=None):
self.use_mu_law = use_mu_la... | CyberTron-master | cybertron/models/gato2/gato/policy/input_tokenizers.py |
CyberTron-master | cybertron/models/gato2/gato/policy/__init__.py | |
import torch
import torch.nn as nn
import gymnasium as gym
import transformers
# import gato
from gato.transformers import GPT2Model
from gato.policy.embeddings import ImageEmbedding
from gato.policy.input_tokenizers import ContinuousTokenizer
from gato.tasks.control_task import ControlTask
class GatoPolicy(nn.Modul... | CyberTron-master | cybertron/models/gato2/gato/policy/gato_policy.py |
import torch
import torch.nn as nn
from einops import rearrange
import math
class ImageEmbedding(nn.Module):
def __init__(
self,
embed_dim=768,
patch_size=16,
resid_mid_channels=128,
num_groups=32,
position_vocab_size=128,
use_po... | CyberTron-master | cybertron/models/gato2/gato/policy/embeddings.py |
# -*- coding: utf-8 -*-
"""HyenaDNA training & inference example (Public)
This code is adapted from the original colab tutorial on HyenaDNA. Check that out for an easier entry point into the code.
We provide the code here as an example for those who want something outside collab, with Huggingface integration.
Origin... | hyena-dna-main | standalone_hyenadna.py |
#@title Huggingface Pretrained Wrapper
"""
This is script is a simple HuggingFace wrapper around a HyenaDNA model, to enable a one click example
of how to load the pretrained weights and get embeddings.
It will instantiate a HyenaDNA model (model class is in the `standalone_hyenadna.py`), and handle the downloading... | hyena-dna-main | huggingface.py |
import copy
import os
import random
import time
from functools import partial, wraps
from typing import Callable, List, Sequence
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, Omeg... | hyena-dna-main | train.py |
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
def fftconv_ref(u, k, D, dropout_mask):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_siz... | hyena-dna-main | csrc/fftconv/launch_fftconv.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dir... | hyena-dna-main | csrc/fftconv/setup.py |
import math
import re
import numpy as np
# N = 8192
N = 16384
# The case of 0 / N is special, we want to simplify it to 0 / 2 instead of 0 / 1
numerator = np.arange(1, N // 8 + 1)
gcd = np.gcd(numerator, N)
num = numerator // gcd
denom = N // gcd
lut_vals = ['T_2_0'] + [f'T_{d}_{n}' for n, d in zip(num, denom)]
lut_... | hyena-dna-main | csrc/fftconv/lut_code_gen.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional ... | hyena-dna-main | evals/soft_prompting_genomics.py |
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional ... | hyena-dna-main | evals/instruction_tuned_genomics.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
from src.models.sequence.long_conv_lm import DNAEmbeddingModel
from src.tasks.decoders import SequenceDecoder
from src.dataloaders import SequenceDataset
import numpy as np
from src.dataloaders.datasets.hg38_char_token... | hyena-dna-main | evals/hg38_inference_decoder.py |
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
sys.path.append(os.environ.get("SAFARI_PATH", "."))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from transformers import AutoTokenizer, GPT2LMHeadModel
# from spacy.lang.en.stop_words import STOP_WO... | hyena-dna-main | evals/hg38_inference.py |
import math
import torch
import torch.nn.functional as F
from sklearn.metrics import f1_score, roc_auc_score
from functools import partial
import torchmetrics.functional as tm_f
import torch.distributions as dist
from sklearn.metrics import f1_score, roc_auc_score, matthews_corrcoef
from torchmetrics import Metric
from... | hyena-dna-main | src/tasks/metrics.py |
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss t... | hyena-dna-main | src/tasks/torchmetrics.py |
from typing import Optional, List, Tuple
import math
import functools
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from omegaconf import ListConfig
from src.models.nn.components import ReversibleInstanceNorm1dInput, ReversibleInstanceNorm1dOutput, \
... | hyena-dna-main | src/tasks/tasks.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Decoder(nn.Module):
"""This class doesn't do much but ... | hyena-dna-main | src/tasks/decoders.py |
import datetime
import math
from typing import ForwardRef
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
from src.models.sequence.block import SequenceResidualBlock
from src.models... | hyena-dna-main | src/tasks/encoders.py |
from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(pl.Callback):
""" Log the number of parameters of the model """
def __init__(
self,
total: bool = True,
... | hyena-dna-main | src/callbacks/params.py |
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
... | hyena-dna-main | src/callbacks/gpu_affinity.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/wandb_callbacks.py
import glob
import os
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers ... | hyena-dna-main | src/callbacks/wandb.py |
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/speed_monitor.py
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing i... | hyena-dna-main | src/callbacks/timer.py |
r"""
Sequence Length Warmup by Reloading
====================
Change sequence lengths according to a stage schedule. The stage parameters sets the sequence length
and batch size.
TODO (not yet supported):
If batch size is not provided for that stage, calculate the batch size based on the
sequence length reshaping in... | hyena-dna-main | src/callbacks/seqlen_warmup_reload.py |
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from omegaconf import OmegaConf
class TrackNorms(pl.Callback):
# TODO do callbacks happen before or after the method in the main LightningModule?
# @rank_zero_onl... | hyena-dna-main | src/callbacks/norms.py |
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class ProgressiveResizing(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'resolution': 4... | hyena-dna-main | src/callbacks/progressive_resizing.py |
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from to... | hyena-dna-main | src/dataloaders/et.py |
from . import et, genomics
from .base import SequenceDataset
| hyena-dna-main | src/dataloaders/__init__.py |
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
from pathlib import Path
from typing import Any, List, Union
from torch.utils.dat... | hyena-dna-main | src/dataloaders/genomics.py |
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import math
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSa... | hyena-dna-main | src/dataloaders/fault_tolerant_sampler.py |
""" Datasets for core experimental results """
import os
import pickle
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torchvision
from einops import rearrange
from einops.layers.torch import Rearrange
from src.utils import is_list, permutations
from torch.nn import funct... | hyena-dna-main | src/dataloaders/base.py |
import torch
import csv
import pandas as pd
import numpy as np
from tqdm import tqdm
import liftover
from pathlib import Path
from pyfaidx import Fasta
from random import randrange, random
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
string_complement_map = {'A': 'T', 'C': ... | hyena-dna-main | src/dataloaders/datasets/chromatin_profile_dataset.py |
from pyfaidx import Fasta
import torch
from random import random
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': ... | hyena-dna-main | src/dataloaders/datasets/nucleotide_transformer_dataset.py |
from itertools import islice
from functools import partial
import os
import functools
# import json
# from pathlib import Path
# from pyfaidx import Fasta
# import polars as pl
# import pandas as pd
import torch
from random import randrange, random
import numpy as np
from pathlib import Path
from src.dataloaders.data... | hyena-dna-main | src/dataloaders/datasets/genomic_bench_dataset.py |
"""
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, S... | hyena-dna-main | src/dataloaders/datasets/hg38_char_tokenizer.py |
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random
import numpy as np
"""
Dataset for sampling arbitrary intervals from the human genome.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():... | hyena-dna-main | src/dataloaders/datasets/hg38_dataset.py |
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self,... | hyena-dna-main | src/dataloaders/datasets/lm_dataset.py |
import torch
from random import random, randint
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.data_check import is_downloaded
"""
In-Context learning version of G... | hyena-dna-main | src/dataloaders/datasets/icl_genomics_dataset.py |
from itertools import islice
from functools import partial
# import tensorflow as tf
import os
import functools
import json
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random, randint
import numpy as np
from src.dataloaders.datase... | hyena-dna-main | src/dataloaders/datasets/hg38_icl_dataset.py |
import os
from pathlib import Path
from pyfaidx import Fasta
import torch
import shutil
import gzip
import random
from typing import Optional, Union, Dict, List
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
import collections
"""
Dataset that randomly samples sequences of length (X) from ... | hyena-dna-main | src/dataloaders/datasets/species_dataset.py |
from pathlib import Path
from pyfaidx import Fasta
import torch
"""
Just a fixed length dataset for 2 test chromosomes, to ensure the test set is the same.
"""
# helper functions
def exists(val):
return val is not None
class HG38FixedDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve... | hyena-dna-main | src/dataloaders/datasets/hg38_fixed_dataset.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | hyena-dna-main | src/dataloaders/utils/vocabulary.py |
"""Utilities for special optimizer hyperparameters.
group_parameters_for_optimizer is a modification of timm's optimizer logic, which is currently unused
add_optimizer_hooks is an improved version that uses this codebase's _optim dictionary
"""
import inspect
import torch.nn as nn
import hydra
def add_optimizer_h... | hyena-dna-main | src/utils/optim_groups.py |
""" Utilities for dealing with collection objects (lists, dicts) and configs """
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_li... | hyena-dna-main | src/utils/config.py |
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr_scheduler.ReduceLROnPlatea... | hyena-dna-main | src/utils/registry.py |
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
| hyena-dna-main | src/utils/__init__.py |
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_p... | hyena-dna-main | src/utils/permutations.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | hyena-dna-main | src/utils/distributed.py |
""" Utils for the training loop. Copied from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py """
import logging
import os
import warnings
from typing import List, Sequence
import torch.nn as nn
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConf... | hyena-dna-main | src/utils/train.py |
import torch
import torch.utils.benchmark as benchmark
def _get_gpu_mem(synchronize=True, empty_cache=True):
return torch.cuda.memory_allocated() / (
(2**20) * 1000
), torch.cuda.memory_cached() / ((2**20) * 1000)
def _generate_mem_hook(handle_ref, mem, idx, hook_type, exp):
def hook(self, *args... | hyena-dna-main | src/utils/profiling.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | hyena-dna-main | src/utils/optim/lamb.py |
"""Custom learning rate schedulers"""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, wa... | hyena-dna-main | src/utils/optim/schedulers.py |
""" Implementations of different types of residual functions. """
import torch
from torch import nn
class Residual(nn.Module):
""" Residual connection with constant affine weights. Can simulate standard residual, no residual, and "constant gates". """
def __init__(self, i_layer, d_input, d_model, alpha=1.0, ... | hyena-dna-main | src/models/nn/residual.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | hyena-dna-main | src/models/nn/adaptive_softmax.py |
from .components import LinearActivation, Activation, Normalization, DropoutNd
| hyena-dna-main | src/models/nn/__init__.py |
""" Utility wrappers around modules to let them handle Args and extra arguments """
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""
Given a callable f that can consume some named arguments,
wrap it with a kwargs that passes back any unused args
EXA... | hyena-dna-main | src/models/nn/utils.py |
""" Defines flexible gating mechanisms based on ideas from LSSL paper and UR-LSTM paper https://arxiv.org/abs/1910.09890 """
import torch
import torch.nn as nn
class Gate(nn.Module):
""" Implements gating mechanisms. TODO update this with more detailed description with reference to LSSL paper when it's on arxiv
... | hyena-dna-main | src/models/nn/gate.py |
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
""" Reductions adapted from https://dsp.stackexchang... | hyena-dna-main | src/models/nn/dxt.py |
""" Utility nn components, in particular handling activations, initializations, and normalization layers """
from functools import partial
import math
from typing import ForwardRef
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from opt_einsum import contract
def stoc... | hyena-dna-main | src/models/nn/components.py |
# Copyright (c) 2023, Tri Dao, Dan Fu.
# Simplified, mostly standalone version of LongConvLM for synthetics.
import math
from functools import partial
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import StochasticDepth
from einops import ... | hyena-dna-main | src/models/sequence/simple_lm.py |
""" Implementation of FFN block in the style of Transformers """
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FF(SequenceModule):
def __init__(self, d_input, expand=2, d_output=None, transposed=Fa... | hyena-dna-main | src/models/sequence/ff.py |
'''PyTorch version of the block FFT convolution as described in the H3 paper.'''
import torch
from einops import rearrange
import math
from torch import nn
from src.models.nn import Activation
from src.utils.train import OptimModule
def ref_dft_matrix(N, H=1):
"""Compute the DFT matrix of size N x N.
Thi... | hyena-dna-main | src/models/sequence/block_fft.py |
from .base import SequenceModule, TransposedModule
from .model import SequenceModel
from .ff import FF
| hyena-dna-main | src/models/sequence/__init__.py |
from functools import partial
import torch
import torch.nn as nn
from flash_attn.utils.generation import GenerationMixin
from flash_attn.utils.distributed import sync_shared_params
try:
from flash_attn.ops.fused_dense import ColumnParallelLinear
except ImportError:
ColumnParallelLinear = None
# grab all func... | hyena-dna-main | src/models/sequence/dna_embedding.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
import opt_einsum as oe
optimized = True
if optimized:
contract = oe.contract
else:
contract = torch.einsum
from src.models.nn import LinearActivation, Activation, DropoutNd
from src.models.sequence.block_fft impo... | hyena-dna-main | src/models/sequence/long_conv.py |
import copy
import math
import re
from functools import partial
from collections import namedtuple, OrderedDict
from collections.abc import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from transformers.models.gpt2.configuration_gpt2 import... | hyena-dna-main | src/models/sequence/long_conv_lm.py |
""" Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation
"""
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from src.utils.confi... | hyena-dna-main | src/models/sequence/model.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import repeat
from src.utils.train import OptimModule
class LongConvKernel(OptimModule):
def __init__(
self,
H,
L,
channels=1,
learning_rate=None,
lam=0.1,
causal=True,
... | hyena-dna-main | src/models/sequence/long_conv_kernel.py |
import math
import sys
from re import U
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange, repeat
try:
from src.ops.fftconv import fftconv_ref, fftconv_func, fftconv_heads_ref
except ImportError:
fftconv_func = None
try:
from f... | hyena-dna-main | src/models/sequence/hyena.py |
""" Implements a full residual block around a black box layer
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from torch import nn
fro... | hyena-dna-main | src/models/sequence/block.py |
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
""" Simple pooling functions that just downsample or repe... | hyena-dna-main | src/models/sequence/pool.py |
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface
A SequenceModule is generally a model that transforms an input of shape
(n_batch, l_sequence, d_model) to (n_batch, l_sequence, d_output)
REQUIRED methods ... | hyena-dna-main | src/models/sequence/base.py |
""" Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface. """
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from src.models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class ... | hyena-dna-main | src/models/sequence/mha.py |
import math
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
@torch.jit.script
def _mul_sum(y, q):
return (y * q).sum(dim=1)
# reference convolution with residual connection
def fftconv_ref(u, k, D, dropout_mask, gelu=True, k_rev=None):
... | hyena-dna-main | src/ops/fftconv.py |
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import math
import torch
from einops import rearrange, repeat
from opt_einsum import contract
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
try:
from cauchy... | hyena-dna-main | src/ops/vandermonde.py |
""" Old utilities for parallel scan implementation of Linear RNNs. """
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
... | hyena-dna-main | src/ops/unroll.py |
""" Compute a Krylov function efficiently. (S4 renames the Krylov function to a "state space kernel")
A : (N, N)
b : (N,)
c : (N,)
Return: [c^T A^i b for i in [L]]
"""
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from src.ops.toeplitz import causal_convolution
def krylov_sequent... | hyena-dna-main | src/ops/krylov.py |
""" Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""E... | hyena-dna-main | src/ops/toeplitz.py |
"""Training example.
Example usage:
python -u main.py \
--dataset=cc3m --val-dataset=cc3m \
--opt-version='facebook/opt-6.7b' --visual-model='openai/clip-vit-large-patch14' \
--exp_name='gill_exp' --log-base-dir='runs/' \
--batch-size=64 --val-batch-size=64 --precision='bf16'
Example ru... | gill-main | main.py |
import tempfile
from share_btn import community_icon_html, loading_icon_html, share_js, save_js
import huggingface_hub
import gradio as gr
from gill import utils
from gill import models
import matplotlib.pyplot as plt
from PIL import Image
import torch
import numpy as np
import os
os.environ["HF_HUB_ENABLE_HF_TRANSFER"... | gill-main | demo/app_gradio.py |
# Modified from https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/blob/79681cd8cb235160a27cdd100673346eb1784e53/share_btn.py
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" ... | gill-main | demo/share_btn.py |
from typing import List, Optional
from collections import namedtuple
from diffusers import StableDiffusionPipeline
import json
import numpy as np
import os
import glob
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import pickle as pkl
from PIL import Image, UnidentifiedImag... | gill-main | gill/models.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.