| | --- |
| | library_name: transformers |
| | pipeline_tag: text-generation |
| | inference: true |
| | widget: |
| | - text: Hello! |
| | example_title: Hello world |
| | group: Python |
| | base_model: |
| | - LiquidAI/LFM2-8B-A1B |
| | --- |
| | |
| | This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [LiquidAI/LFM2-8B-A1B](https://huggingface.co/LiquidAI/LFM2-8B-A1B). |
| |
|
| | ### Example usage: |
| |
|
| | ```python |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | |
| | # Load model and tokenizer |
| | model_id = "tiny-random/lfm2-moe" |
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_id, |
| | device_map="cuda", |
| | dtype="bfloat16", |
| | trust_remote_code=True, |
| | attn_implementation="flash_attention_2", |
| | ) |
| | tokenizer = AutoTokenizer.from_pretrained(model_id) |
| | |
| | # Generate answer |
| | prompt="What is AI?" |
| | input_ids=tokenizer.apply_chat_template( |
| | [{"role": "user", "content": prompt}], |
| | add_generation_prompt=True, |
| | return_tensors="pt", |
| | tokenize=True, |
| | ).to(model.device) |
| | |
| | output=model.generate( |
| | input_ids, |
| | do_sample=True, |
| | temperature=0.3, |
| | min_p=0.15, |
| | repetition_penalty=1.05, |
| | max_new_tokens=32, |
| | ) |
| | |
| | print(tokenizer.decode(output[0], skip_special_tokens=False)) |
| | ``` |
| |
|
| | ### Codes to create this repo: |
| |
|
| | ```python |
| | import json |
| | from pathlib import Path |
| | |
| | import accelerate |
| | import torch |
| | from huggingface_hub import file_exists, hf_hub_download |
| | from transformers import ( |
| | AutoConfig, |
| | AutoModelForCausalLM, |
| | AutoProcessor, |
| | GenerationConfig, |
| | set_seed, |
| | ) |
| | |
| | source_model_id = "LiquidAI/LFM2-8B-A1B" |
| | save_folder = "/tmp/tiny-random/lfm2-moe" |
| | |
| | processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True) |
| | processor.save_pretrained(save_folder) |
| | |
| | with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| | config_json = json.load(f) |
| | config_json['hidden_size'] = 64 |
| | config_json['intermediate_size'] = 128 |
| | config_json['layer_types'] = ['conv', 'conv', 'full_attention'] |
| | config_json['moe_intermediate_size'] = 128 |
| | config_json['num_dense_layers'] = 2 |
| | config_json['num_attention_heads'] = 2 |
| | config_json['num_hidden_layers'] = 3 |
| | config_json['num_key_value_heads'] = 1 |
| | config_json['use_cache'] = True |
| | # config_json['tie_word_embeddings'] = True |
| | with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| | json.dump(config_json, f, indent=2) |
| | |
| | config = AutoConfig.from_pretrained( |
| | save_folder, |
| | trust_remote_code=True, |
| | ) |
| | print(config) |
| | torch.set_default_dtype(torch.bfloat16) |
| | model = AutoModelForCausalLM.from_config(config) |
| | torch.set_default_dtype(torch.float32) |
| | if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| | model.generation_config = GenerationConfig.from_pretrained( |
| | source_model_id, trust_remote_code=True, |
| | ) |
| | set_seed(42) |
| | model = model.cpu() # cpu is more stable for random initialization across machines |
| | with torch.no_grad(): |
| | for name, p in sorted(model.named_parameters()): |
| | torch.nn.init.normal_(p, 0, 0.1) |
| | print(name, p.shape) |
| | model.save_pretrained(save_folder) |
| | print(model) |
| | ``` |
| |
|
| | ### Printing the model: |
| |
|
| | ```text |
| | Lfm2MoeForCausalLM( |
| | (model): Lfm2MoeModel( |
| | (embed_tokens): Embedding(65536, 64, padding_idx=0) |
| | (layers): ModuleList( |
| | (0-1): 2 x Lfm2MoeDecoderLayer( |
| | (conv): Lfm2MoeShortConv( |
| | (conv): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(2,), groups=64, bias=False) |
| | (in_proj): Linear(in_features=64, out_features=192, bias=False) |
| | (out_proj): Linear(in_features=64, out_features=64, bias=False) |
| | ) |
| | (feed_forward): Lfm2MoeMLP( |
| | (w1): Linear(in_features=64, out_features=128, bias=False) |
| | (w3): Linear(in_features=64, out_features=128, bias=False) |
| | (w2): Linear(in_features=128, out_features=64, bias=False) |
| | ) |
| | (operator_norm): Lfm2MoeRMSNorm((64,), eps=1e-05) |
| | (ffn_norm): Lfm2MoeRMSNorm((64,), eps=1e-05) |
| | ) |
| | (2): Lfm2MoeDecoderLayer( |
| | (self_attn): Lfm2MoeAttention( |
| | (q_proj): Linear(in_features=64, out_features=64, bias=False) |
| | (k_proj): Linear(in_features=64, out_features=32, bias=False) |
| | (v_proj): Linear(in_features=64, out_features=32, bias=False) |
| | (out_proj): Linear(in_features=64, out_features=64, bias=False) |
| | (q_layernorm): Lfm2MoeRMSNorm((32,), eps=1e-05) |
| | (k_layernorm): Lfm2MoeRMSNorm((32,), eps=1e-05) |
| | ) |
| | (feed_forward): Lfm2MoeSparseMoeBlock( |
| | (gate): Linear(in_features=64, out_features=32, bias=False) |
| | (experts): Lfm2MoeExperts( |
| | (0-31): 32 x Lfm2MoeMLP( |
| | (w1): Linear(in_features=64, out_features=128, bias=False) |
| | (w3): Linear(in_features=64, out_features=128, bias=False) |
| | (w2): Linear(in_features=128, out_features=64, bias=False) |
| | ) |
| | ) |
| | ) |
| | (operator_norm): Lfm2MoeRMSNorm((64,), eps=1e-05) |
| | (ffn_norm): Lfm2MoeRMSNorm((64,), eps=1e-05) |
| | ) |
| | ) |
| | (pos_emb): Lfm2MoeRotaryEmbedding() |
| | (embedding_norm): Lfm2MoeRMSNorm((64,), eps=1e-05) |
| | ) |
| | (lm_head): Linear(in_features=64, out_features=65536, bias=False) |
| | ) |
| | ``` |