This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from tencent/Hy3-preview.

File path Size
model.safetensors 5.4MB

Example usage:

  • vLLM
# Multi-token prediction is supported
model_id=tiny-random/hy3
vllm serve $model_id \
  --tensor-parallel-size 2 \
  --speculative-config.method mtp \
  --speculative-config.num_speculative_tokens 1 \
  --tool-call-parser hy_v3 \
  --reasoning-parser hy_v3 \
  --enable-auto-tool-choice
  • SGLang
# Multi-token prediction is supported
model_id=tiny-random/hy3
python3 -m sglang.launch_server \
  --model $model_id \
  --tp 2 \
  --tool-call-parser hunyuan \
  --reasoning-parser hunyuan \
  --speculative-num-steps 1 \
  --speculative-eagle-topk 1 \
  --speculative-num-draft-tokens 2 \
  --speculative-algorithm EAGLE
  • Transformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_id = "tiny-random/hy3"

tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
messages = [
    {"role": "user", "content": "Write a short poem about AI."},
]
inputs = tokenizer.apply_chat_template(
    messages,
    tokenize=True,
    return_tensors="pt",
    add_generation_prompt=True,
    reasoning_effort='high',
)
print(inputs)
outputs = model.generate(**inputs.to(model.device), max_new_tokens=32)
output_text = tokenizer.decode(outputs[0])
print(output_text)

Codes to create this repo:

Click to expand
import json
from copy import deepcopy
from pathlib import Path

import torch
import torch.nn as nn

from huggingface_hub import file_exists, hf_hub_download
from transformers import (
    AutoConfig,
    AutoModelForCausalLM,
    AutoTokenizer,
    GenerationConfig,
    set_seed,
)

source_model_id = "tencent/Hy3-preview"
save_folder = "/tmp/tiny-random/hy3"

processor = AutoTokenizer.from_pretrained(source_model_id, trust_remote_code=True)
processor.save_pretrained(save_folder)

with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
    config_json = json.load(f)
config_json.update({
    'expert_hidden_dim': 32,
    'moe_intermediate_size': 32,
    'head_dim': 32,
    'hidden_size': 8,
    'intermediate_size': 32,
    'num_attention_heads': 8,
    'num_hidden_layers': 4,
    'num_key_value_heads': 4,
})
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
    json.dump(config_json, f, indent=2)

config = AutoConfig.from_pretrained(
    save_folder,
    trust_remote_code=True,
)
print(config)
torch.set_default_dtype(torch.bfloat16)
set_seed(42)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True).eval().cpu()
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
    model.generation_config = GenerationConfig.from_pretrained(
        source_model_id, trust_remote_code=True,
    )
    model.generation_config.top_k = 40  # original value in source model is -1 , which is invalid

# mtp
mtp = deepcopy(model.model.layers[-1])
mtp.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
mtp.enorm = nn.RMSNorm(config.hidden_size)
mtp.hnorm = nn.RMSNorm(config.hidden_size)
mtp.final_layernorm = nn.RMSNorm(config.hidden_size)
model.model.layers.append(mtp)

# init weights
set_seed(42)
model = model.cpu().eval()
n_params = sum(p.numel() for p in model.parameters())
with torch.no_grad():
    for name, p in sorted(model.named_parameters()):
        torch.nn.init.normal_(p, 0, 0.2)
        print(name, p.shape, p.dtype, f'{p.numel() / n_params * 100: .2f}%')

# expert bias is in float32
for i in range(config.first_k_dense_replace, config.num_hidden_layers + 1, 1):
    model.model.layers[i].mlp.e_score_correction_bias = nn.Parameter(torch.randn_like(
        model.model.layers[i].mlp.e_score_correction_bias
    ).float() * 0.002)

model.save_pretrained(save_folder)
print(model)
torch.set_default_dtype(torch.float32)

Printing the model:

Click to expand
HYV3ForCausalLM(
  (model): HYV3Model(
    (embed_tokens): Embedding(120832, 8, padding_idx=120002)
    (layers): ModuleList(
      (0): HYV3DecoderLayer(
        (self_attn): HYV3Attention(
          (q_proj): Linear(in_features=8, out_features=256, bias=False)
          (k_proj): Linear(in_features=8, out_features=128, bias=False)
          (v_proj): Linear(in_features=8, out_features=128, bias=False)
          (o_proj): Linear(in_features=256, out_features=8, bias=False)
          (q_norm): HYV3RMSNorm((32,), eps=1e-05)
          (k_norm): HYV3RMSNorm((32,), eps=1e-05)
        )
        (mlp): HYV3MLP(
          (gate_proj): Linear(in_features=8, out_features=32, bias=False)
          (up_proj): Linear(in_features=8, out_features=32, bias=False)
          (down_proj): Linear(in_features=32, out_features=8, bias=False)
          (act_fn): SiLUActivation()
        )
        (input_layernorm): HYV3RMSNorm((8,), eps=1e-05)
        (post_attention_layernorm): HYV3RMSNorm((8,), eps=1e-05)
      )
      (1-3): 3 x HYV3DecoderLayer(
        (self_attn): HYV3Attention(
          (q_proj): Linear(in_features=8, out_features=256, bias=False)
          (k_proj): Linear(in_features=8, out_features=128, bias=False)
          (v_proj): Linear(in_features=8, out_features=128, bias=False)
          (o_proj): Linear(in_features=256, out_features=8, bias=False)
          (q_norm): HYV3RMSNorm((32,), eps=1e-05)
          (k_norm): HYV3RMSNorm((32,), eps=1e-05)
        )
        (mlp): HYV3MoE(
          (gate): HYV3TopKRouter()
          (experts): HYV3Experts(
            (act_fn): SiLUActivation()
          )
          (shared_experts): HYV3MLP(
            (gate_proj): Linear(in_features=8, out_features=32, bias=False)
            (up_proj): Linear(in_features=8, out_features=32, bias=False)
            (down_proj): Linear(in_features=32, out_features=8, bias=False)
            (act_fn): SiLUActivation()
          )
        )
        (input_layernorm): HYV3RMSNorm((8,), eps=1e-05)
        (post_attention_layernorm): HYV3RMSNorm((8,), eps=1e-05)
      )
      (4): HYV3DecoderLayer(
        (self_attn): HYV3Attention(
          (q_proj): Linear(in_features=8, out_features=256, bias=False)
          (k_proj): Linear(in_features=8, out_features=128, bias=False)
          (v_proj): Linear(in_features=8, out_features=128, bias=False)
          (o_proj): Linear(in_features=256, out_features=8, bias=False)
          (q_norm): HYV3RMSNorm((32,), eps=1e-05)
          (k_norm): HYV3RMSNorm((32,), eps=1e-05)
        )
        (mlp): HYV3MoE(
          (gate): HYV3TopKRouter()
          (experts): HYV3Experts(
            (act_fn): SiLUActivation()
          )
          (shared_experts): HYV3MLP(
            (gate_proj): Linear(in_features=8, out_features=32, bias=False)
            (up_proj): Linear(in_features=8, out_features=32, bias=False)
            (down_proj): Linear(in_features=32, out_features=8, bias=False)
            (act_fn): SiLUActivation()
          )
        )
        (input_layernorm): HYV3RMSNorm((8,), eps=1e-05)
        (post_attention_layernorm): HYV3RMSNorm((8,), eps=1e-05)
        (eh_proj): Linear(in_features=16, out_features=8, bias=False)
        (enorm): RMSNorm((8,), eps=None, elementwise_affine=True)
        (hnorm): RMSNorm((8,), eps=None, elementwise_affine=True)
        (final_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True)
      )
    )
    (norm): HYV3RMSNorm((8,), eps=1e-05)
    (rotary_emb): HYV3RotaryEmbedding()
  )
  (lm_head): Linear(in_features=8, out_features=120832, bias=False)
)

Test environment:

  • torch: 2.11.0+cu126
  • transformers: 5.7.0.dev0
Downloads last month
-
Safetensors
Model size
2.57M params
Tensor type
F32
·
BF16
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for tiny-random/hy3

Finetuned
(4)
this model

Collection including tiny-random/hy3