| --- |
| library_name: transformers |
| pipeline_tag: text-generation |
| inference: true |
| widget: |
| - text: Hello! |
| example_title: Hello world |
| group: Python |
| base_model: |
| - microsoft/Phi-4-mini-flash-reasoning |
| --- |
| |
| This tiny model is for debugging. It is randomly initialized with the config adapted from [microsoft/Phi-4-mini-flash-reasoning](https://huggingface.co/microsoft/Phi-4-mini-flash-reasoning). |
|
|
| ### Example usage: |
|
|
| ```python |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| torch.random.manual_seed(0) |
| |
| model_id = "tiny-random/phi-4-flash" |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| device_map="cuda", |
| torch_dtype=torch.bfloat16, |
| trust_remote_code=True, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) |
| |
| messages = [{ |
| "role": "user", |
| "content": "How to solve 3*x^2+4*x+5=1?" |
| }] |
| inputs = tokenizer.apply_chat_template( |
| messages, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt", |
| ) |
| |
| outputs = model.generate( |
| **inputs.to(model.device), |
| max_new_tokens=600, |
| temperature=0.6, |
| top_p=0.95, |
| do_sample=True, |
| ) |
| outputs = tokenizer.batch_decode(outputs[:, inputs["input_ids"].shape[-1]:]) |
| |
| print(outputs[0]) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| ```python |
| import json |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| set_seed, |
| ) |
| |
| source_model_id = "microsoft/Phi-4-mini-flash-reasoning" |
| save_folder = "/tmp/tiny-random/phi-4-flash" |
| |
| processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| for key in ['AutoConfig', 'AutoModelForCausalLM']: |
| config_json['auto_map'][key] = f'{source_model_id}--' + config_json['auto_map'][key] |
| automap = config_json['auto_map'] |
| config_json['hidden_size'] = 64 |
| config_json['intermediate_size'] = 64 |
| config_json['num_attention_heads'] = 2 |
| config_json['num_hidden_layers'] = 4 |
| config_json['num_key_value_heads'] = 2 |
| config_json['tie_word_embeddings'] = True |
| config_json['sliding_window'] = 512 |
| config_json['use_cache'] = True |
| config_json['mb_per_layer'] = 2 # first layer is mamba |
| |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) |
| torch.set_default_dtype(torch.float32) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| set_seed(42) |
| model = model.cpu() # cpu is more stable for random initialization across machines |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.2) |
| print(name, p.shape) |
| model.save_pretrained(save_folder) |
| print(model) |
| |
| with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json['auto_map'] = automap |
| config_json['sliding_window'] = 512 # a bugfix for '<' not supported between instances of 'int' and 'list' |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| for python_file in Path(save_folder).glob('*.py'): |
| if python_file.name.startswith('modeling_') or python_file.name.startswith('configuration_'): |
| python_file.unlink() |
| ``` |
|
|
| ### Printing the model: |
|
|
| ```text |
| Phi4FlashForCausalLM( |
| (model): Phi4FlashModel( |
| (embed_tokens): Embedding(200064, 64, padding_idx=199999) |
| (embed_dropout): Dropout(p=0.0, inplace=False) |
| (layers): ModuleList( |
| (0): SambaYDecoderLayer( |
| (mlp): SambaYMLP( |
| (fc1): Linear(in_features=64, out_features=128, bias=False) |
| (fc2): Linear(in_features=64, out_features=64, bias=False) |
| (activation_fn): SiLU() |
| ) |
| (input_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| (attn): Phi3Mamba( |
| (in_proj): Linear(in_features=64, out_features=256, bias=False) |
| (conv1d): Conv1d(128, 128, kernel_size=(4,), stride=(1,), padding=(3,), groups=128) |
| (act): SiLU() |
| (x_proj): Linear(in_features=128, out_features=36, bias=False) |
| (dt_proj): Linear(in_features=4, out_features=128, bias=True) |
| (out_proj): Linear(in_features=128, out_features=64, bias=False) |
| ) |
| (resid_attn_dropout): Dropout(p=0.0, inplace=False) |
| (resid_mlp_dropout): Dropout(p=0.0, inplace=False) |
| (post_attention_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| ) |
| (1): SambaYDecoderLayer( |
| (mlp): SambaYMLP( |
| (fc1): Linear(in_features=64, out_features=128, bias=False) |
| (fc2): Linear(in_features=64, out_features=64, bias=False) |
| (activation_fn): SiLU() |
| ) |
| (input_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| (attn): SambaYFlashAttention2( |
| (out_proj): Linear(in_features=64, out_features=64, bias=True) |
| (Wqkv): Linear(in_features=64, out_features=192, bias=True) |
| (inner_cross_attn): FlashDiffCustomAttention( |
| (subln): SambaYRMSNorm() |
| ) |
| ) |
| (resid_attn_dropout): Dropout(p=0.0, inplace=False) |
| (resid_mlp_dropout): Dropout(p=0.0, inplace=False) |
| (post_attention_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| ) |
| (2): SambaYDecoderLayer( |
| (mlp): SambaYMLP( |
| (fc1): Linear(in_features=64, out_features=128, bias=False) |
| (fc2): Linear(in_features=64, out_features=64, bias=False) |
| (activation_fn): SiLU() |
| ) |
| (input_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| (attn): Phi3Mamba( |
| (in_proj): Linear(in_features=64, out_features=256, bias=False) |
| (conv1d): Conv1d(128, 128, kernel_size=(4,), stride=(1,), padding=(3,), groups=128) |
| (act): SiLU() |
| (x_proj): Linear(in_features=128, out_features=36, bias=False) |
| (dt_proj): Linear(in_features=4, out_features=128, bias=True) |
| (out_proj): Linear(in_features=128, out_features=64, bias=False) |
| ) |
| (resid_attn_dropout): Dropout(p=0.0, inplace=False) |
| (resid_mlp_dropout): Dropout(p=0.0, inplace=False) |
| (post_attention_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| ) |
| (3): SambaYDecoderLayer( |
| (mlp): SambaYMLP( |
| (fc1): Linear(in_features=64, out_features=128, bias=False) |
| (fc2): Linear(in_features=64, out_features=64, bias=False) |
| (activation_fn): SiLU() |
| ) |
| (input_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| (attn): SambaYFlashAttention2( |
| (out_proj): Linear(in_features=64, out_features=64, bias=True) |
| (Wqkv): Linear(in_features=64, out_features=192, bias=True) |
| (inner_cross_attn): FlashDiffCustomAttention( |
| (subln): SambaYRMSNorm() |
| ) |
| ) |
| (resid_attn_dropout): Dropout(p=0.0, inplace=False) |
| (resid_mlp_dropout): Dropout(p=0.0, inplace=False) |
| (post_attention_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| ) |
| ) |
| (final_layernorm): LayerNorm((64,), eps=1e-05, elementwise_affine=True) |
| ) |
| (lm_head): Linear(in_features=64, out_features=200064, bias=False) |
| ) |
| ``` |