python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/version.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/__init__.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/utils.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/experimental/fusion/jaxpr_rewriter.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/experimental/fusion/lowering.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/experimental/fusion/__init__.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/experimental/fusion/fusion.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/registration.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/primitives.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/pallas_call.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/__init__.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/core.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/triton_lowering.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/utils.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/ops/attention.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/ops/__init__.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/ops/softmax.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/ops/rms_norm.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
jax_triton/pallas/ops/layer_norm.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/matmul.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/add.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/block_map.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/fused_attention.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/softmax.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/fusion/nn.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/fusion/benchmark_matmul.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/pallas/blocksparse_matmul.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/pallas/templating.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/pallas/fused_attention.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/pallas/lstm.py
# Copyright 2023 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to ...
jax-triton-main
examples/pallas/layer_norm.py
import torch from lenet5 import LeNet5 input_data = torch.randn(1, 3, 32, 32)#.to(device=device) # 3 channels for color image model = LeNet5() result = model(input_data) print(result) print(result.shape) print(result.dtype)
LeNet5-main
example.py
from lenet5.model import LeNet5, device from lenet5.training import train
LeNet5-main
lenet5/__init__.py
import torch from torch import nn import torch.nn.functional as F class LeNet5(nn.Module): def __init__(self): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) #3 = in channels -> 6=out_channels, 5=kernel_size self.conv2 = nn.Conv2d(6, 16, 5) #6=in_chanels, => 16=out_channels,...
LeNet5-main
lenet5/model.py
from torch import optim from torch import nn from lenet5.model import LeNet5 from lenet5.model import device model = LeNet5() loss = nn.CrossEntropyLoss() # init cross entropy optim = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # init stochastic gradient descent with the model parameters, don't forget to i...
LeNet5-main
lenet5/training.py
from setuptools import setup, find_packages setup( name = 'orca_transformer', packages = find_packages(exclude=['examples']), version = '1.1.4', license='MIT', description = 'phi - Pytorch', author = 'Kye Gomez', author_email = 'kye@apac.ai', url = 'https://github.com/kyegomez/Phi', long_description_...
phi-1-master
setup.py
import torch from PHI import phi2 x = torch.randint(0, 256, (1, 1024)).cuda() phi2(x) # (1, 1024, 20000)
phi-1-master
example.py
from old.traingv2 import TrainAndromeda from old.build_dataset import built_dataset
phi-1-master
PHI/__init__.py
from optimus_prime import TransformerWrapper, AutoregressiveWrapper, AndromedaEmbedding, Decoder Phi = TransformerWrapper( num_tokens=64007, max_seq_len=8192, use_abs_pos_emb=False, # tokenizer=tokenizer, embedding_provider=AndromedaEmbedding(), attn_layers = Decoder( dim=2560, # 2048 ...
phi-1-master
PHI/model.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch from torch.distributed.fsdp import ( FullyShardedDataParallel, MixedPrecision, BackwardPrefetch, ShardingStrategy, ) from accelerate import Accelerator from...
phi-1-master
PHI/train_distributed_accelerate.py
import torch from transformers import AutoTokenizer from einops._torch_specific import allow_ops_in_compiled_graph import argparse def main(): allow_ops_in_compiled_graph() torch.hub._validate_not_a_forked_repo = lambda a, b, c: True parser = argparse.ArgumentParser(description="Generate text using Phi ...
phi-1-master
PHI/inference.py
import multiprocessing import argparse from itertools import chain from datasets import load_dataset from transformers import AutoTokenizer #falcon tokenizer """ Falcon dataset Data Fields content: the processed and cleaned text contained in the page; url: the url of the webpage crawled to produce the sample; timesta...
phi-1-master
PHI/build_dataset.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch from torch.distributed.fsdp import ( FullyShardedDataParallel, MixedPrecision, BackwardPrefetch, ShardingStrategy, ) from accelerate import Accelerator from...
phi-1-master
PHI/train_distributed.py
from math import ceil import torch from torch import nn import torch.nn.functional as F from einops import rearrange, pack, unpack from optimus_prime.autoregressive_wrapper import top_p, top_k, eval_decorator # helper functions def exists(val): return val is not None def divisible_by(numer, denom): return ...
phi-1-master
PHI/optimus_prime/xl_autoregressive_wrapper.py
from math import ceil import torch from torch import nn import torch.nn.functional as F from einops import rearrange, pack, unpack def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, ...
phi-1-master
PHI/optimus_prime/autoregressive_wrapper.py
#add ability to choose your own tokenizer, and embedder, and ask what else can be done for production level training import math from random import random import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from functools import partial, wraps from inspect import isfunction from collec...
phi-1-master
PHI/optimus_prime/x_transformers.py
import torch from packaging import version if version.parse(torch.__version__) >= version.parse('2.0.0'): from einops._torch_specific import allow_ops_in_compiled_graph allow_ops_in_compiled_graph() from optimus_prime.x_transformers import XTransformer, Encoder, Decoder, CrossAttender, Attention, TransformerW...
phi-1-master
PHI/optimus_prime/__init__.py
import torch from torch import nn import torch.nn.functional as F def exists(val): return val is not None class ContinuousAutoregressiveWrapper(nn.Module): def __init__(self, net, ignore_index = -100, pad_value = 0): super().__init__() self.net = net self.max_seq_len = net.max_seq_len ...
phi-1-master
PHI/optimus_prime/continuous_autoregressive_wrapper.py
from functools import partial import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange # constants EfficientAttentionConfig = namedtup...
phi-1-master
PHI/optimus_prime/attend.py
import math from random import random from contextlib import nullcontext from collections import namedtuple import torch import torch.nn.functional as F from torch import nn from einops import rearrange, repeat, pack, unpack from optimus_prime.x_transformers import TransformerWrapper from typing import Optional # ...
phi-1-master
PHI/optimus_prime/nonautoregressive_wrapper.py
import math import multiprocessing import os import collections from datetime import timedelta from functools import partial from itertools import chain import torch from accelerate import Accelerator from accelerate.utils import InitProcessGroupKwargs from datasets import concatenate_datasets, load_dataset from t...
phi-1-master
PHI/old/training_sophia.py
import math import torch from torch import Tensor from torch.optim.optimizer import Optimizer from typing import List, Optional class SophiaG(Optimizer): def __init__(self, params, lr=1e-4, betas=(0.965, 0.99), rho = 0.04, weight_decay=1e-1, *, maximize: bool = False, capturable: bool = False): ...
phi-1-master
PHI/old/sophia.py
#quantization + paralleism import time import torch from accelerate.utils import set_seed from datasets import load_dataset from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from accelerate import Accelerator f...
phi-1-master
PHI/old/training.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, ...
phi-1-master
PHI/utils/stable_adamw.py
import torch # from palm_rlhf_pytorch.palm import LayerNorm from torch.nn import LayerNorm from torch.optim import AdamW # from palm.utils import print_main from utils.helpers import print_main from utils.stable_adamw import StableAdamWUnfused # optimizers def decoupled_optimizer( model: torch.nn.Module, le...
phi-1-master
PHI/utils/decoupled_optimizer.py
import math import torch from torch import einsum, _nnpack_available import torch.nn.functional as F from torch import nn from einops import rearrange import copy from pathlib import PurePath from tqdm import tqdm_gui from beartype import beartype from beartype.typing import Tuple, Optional from einops import rearra...
phi-1-master
PHI/utils/rf_utils.py
import torch.distributed as dist # Add this line def print_num_params(model): n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) if dist.is_available(): if dist.get_rank() == 0: print(f"Number of parameters in model: {n_params}") else: print(f"Number of p...
phi-1-master
PHI/utils/helpers.py
import torch from model import PALME # Create a sample text token tensor text_tokens = torch.randint(0, 32002, (1, 50), dtype=torch.long) # Create a sample image tensor images = torch.randn(1, 3, 224, 224) # Instantiate the model model = PALME() # Pass the sample tensors to the model's forward function output = mod...
Minerva-main
Minerva/model_test.py
# Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] import torch import torch.nn as nn import torch.nn.functional as F class VisionLanguageEmbedding(nn.Module): def __init__(self, text_embed, vision_embed): super().__init__() self.text_embed = text_embed ...
Minerva-main
Minerva/embedding.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, ...
Minerva-main
Minerva/stable_adamw.py
import torch import torch.nn as nn from palm_rlhf_pytorch import PaLM from transformer import AutoTokenizer import bitsandbytes as bnb from Minerva.embedding import PositionalEmbedding class MinervaTokenizer: def __init__(self): try: self.tokenizer = AutoTokenizer.from_pretrained( ...
Minerva-main
Minerva/model.py
import torch.distributed as dist # Add this line def print_num_params(model): n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) if dist.is_available(): if dist.get_rank() == 0: print(f"Number of parameters in model: {n_params}") else: print(f"Number of p...
Minerva-main
Minerva/utils.py
import multiprocessing import argparse from itertools import chain from datasets import load_dataset from model import PALME_Tokenizer import torch class CFG: SEED: int = 42 SEQ_LEN: int = 8192 NUM_CPU: int = multiprocessing.cpu_count() HF_ACCOUNT_REPO: str = "YOUR HF ACCOUNT" TOKENIZER: str = "Ele...
Minerva-main
Minerva/build_dataset.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch from torch.distributed.fsdp import ( FullyShardedDataParallel, MixedPrecision, BackwardPrefetch, ShardingStrategy, ) from accelerate.utils import InitProce...
Minerva-main
Minerva/train_distributed.py
from setuptools import find_packages, setup setup( name='gato', version='0.0.1', description='Gato: A Generalist Agent', url='https://github.com/kyegomez/GATO', author='Kye Gomez', author_email='kye@apac.ai', long_description=open('README.md', 'r', encoding='utf-8').read(), long_descrip...
GATO-master
setup.py
import torch from gato.model import Gato, GatoConfig # Create model instance config = GatoConfig.small() gato = Gato(config) # Fake inputs for Gato input_dim = config.input_dim input_ids = torch.cat([ torch.rand((1, 1, input_dim)) for _ in range(20)] + # 20 image patches [torch.full((1, 1, input_dim), 0.25),...
GATO-master
example.py
from gato.model import Gato
GATO-master
gato/__init__.py
import copy from collections import namedtuple from dataclasses import dataclass from functools import wraps from typing import Any, Dict, Union import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange from packaging import version from torch import Tensor, einsum # constants E...
GATO-master
gato/model.py
from ray.rllib.algorithms.impala import ImpalaConfig from ray.tune.logger import pretty_print import datetime import os import tempfile from ray.tune.logger.unified import UnifiedLogger # noqa: E402 def custom_log_creator(custom_path, custom_str): timestr = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%...
GATO-master
datasets/control_env/ALE_Atari/atari_test_impala.py
import torch from gpt4.gpt4 import GPT4 x = torch.randint(0, 256, (1, 1024)).cuda() model = GPT4() model(x)
GPT4-main
example_language.py
import torch from gpt4.gpt4 import GPT4MultiModal #usage img = torch.randn(1, 3, 256, 256) caption = torch.randint(0, 20000, (1, 1024)) model = GPT4MultiModal() output = model(img, caption) print(output.shape) # (1, 1024, 20000)
GPT4-main
example_multimodal.py
from gpt4.gpt4 import GPT4 from gpt4.train import train
GPT4-main
gpt4/__init__.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction # constants from math import ceil from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unp...
GPT4-main
gpt4/model.py
from collections import namedtuple from dataclasses import dataclass from functools import partial, wraps from typing import Optional import torch import torch.nn.functional as F from einops import rearrange from packaging import version from torch import Tensor, einsum, nn # constants EfficientAttentionConfig = nam...
GPT4-main
gpt4/attend.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch ########### SETUP CONFIG import torch.distributed as dist from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import Acceler...
GPT4-main
gpt4/train.py
import torch import torch.nn as nn from gpt4.model import ( AutoregressiveWrapper, Decoder, Encoder, Transformer, ViTransformerWrapper, ) class GPT4(nn.Module): """ GPT4 is a transformer-based model architecture. It initializes with a Transformer and AutoregressiveWrapper with defaul...
GPT4-main
gpt4/gpt4.py
GPT4-main
gpt4/utils/__init__.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, ...
GPT4-main
gpt4/utils/stable_adam.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os from typing import List from setuptools import find_packages, setup def get_version() -> str: # https://packaging.python.org/guides/single-sourcing-package-version/ init = open(os.path.join("stable_alignment", "__init__.py"), "r").read().split() r...
Stable-Alignment-main
setup.py
"""Run inference on a trained model. Make sure you have downloaded the model in the `model_path` directory. Example: python stable_alignment/run_inference.py --model_path './models/socially-good-lm' --device 'cuda:0' """ import json import os from typing import Any, Dict, List, Optional import torch import tran...
Stable-Alignment-main
run_inference.py
"""The script to collect data for social simulations. Example: python collect_data.py --model_name 'gpt4' --world_ids "1, 2, 3, 4, 5" """ import glob import math from typing import Any, Dict, Sequence import pandas as pd from absl import app, flags FLAGS = flags.FLAGS CACHE_DIR_PREFIX: str = "./data/cache" fla...
Stable-Alignment-main
collect_data.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
test/test_utils.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
test/test_agent.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
test/__init__.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/simulation.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/alignment.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/__init__.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/sandbox/world.py
"""Sandbox Package.""" from stable_alignment.sandbox.agent import Agent from stable_alignment.sandbox.utils import ( call_gpt, finalize_answer, get_moral_score_cls, get_query_questions, load_initial_data, sample_init_data, ) from stable_alignment.sandbox.world import World __all__ = [ "Age...
Stable-Alignment-main
stable_alignment/sandbox/__init__.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/sandbox/utils.py
#! /usr/bin/env python3 # coding=utf-8 # Ruibo Liu @Dartmouth College # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by ap...
Stable-Alignment-main
stable_alignment/sandbox/agent.py
from fastapi import FastAPI, HTTPException, Depends from fastapi.security import APIKeyHeader from pydantic import BaseModel from typing import Optional from slowapi import Limiter, _rate_limit_exceeded_handler from slowapi.util import get_remote_address from slowapi.errors import RateLimitExceeded from kosmos_model ...
KosmosX-API-main
api.py
from fairseq_cli.preprocess import cli_main if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/preprocess.py
from fairseq_cli.generate import cli_main if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/generate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import argparse import logging import math import os impor...
KosmosX-API-main
kosmosX/validate.py
from fairseq_cli.interactive import cli_main if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/interactive.py
from fairseq_cli.train import cli_main if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/train.py
import os import textwrap import os import numpy as np import torch import torchvision.transforms as T from PIL import Image import matplotlib.pyplot as plt import matplotlib.pylab as pylab pylab.rcParams['figure.figsize'] = 20, 12 import cv2 from decode_string import decode_bbox_from_caption EOD_SYMBOL = "</doc>"...
KosmosX-API-main
kosmosX/demo/draw_box.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate raw text with a trained model. Batches data on-the-fly. """ import sys sys.path.append( '.' ) import ...
KosmosX-API-main
kosmosX/demo/gradio_app.py
import re import numpy as np def find_patch_index_combinations(s): # The regular expression pattern for matching the required formats pattern = r'(?:(<phrase>([^<]+)</phrase>))?<object>((?:<patch_index_\d+><patch_index_\d+></delimiter_of_multi_objects/>)*<patch_index_\d+><patch_index_\d+>)</object>' ...
KosmosX-API-main
kosmosX/demo/decode_string.py