merge_method: dare_ties base_model: meta-llama/Meta-Llama-3.1-8B-Instruct dtype: bfloat16 parameters: int8_mask: true normalize: true rescale: true density: 0.2 # Global fallback slices: # 1️⃣ Foundation [0-8] - sources: - model: meta-llama/Meta-Llama-3.1-8B-Instruct layer_range: [0, 8] # 2️⃣ Logic (Hermes) [8-20] - sources: - model: meta-llama/Meta-Llama-3.1-8B-Instruct layer_range: [8, 20] parameters: weight: 0.6 density: 0.2 - model: NousResearch/Hermes-3-Llama-3.1-8B layer_range: [8, 20] parameters: weight: 0.4 density: 0.3 # Keep Logic sparse (sharp) # 3️⃣ Knowledge (Aloe) [20-28] - sources: - model: meta-llama/Meta-Llama-3.1-8B-Instruct layer_range: [20, 28] parameters: weight: 0.55 density: 0.2 - model: HPAI-BSC/Llama3.1-Aloe-Beta-8B layer_range: [20, 28] parameters: weight: 0.45 # High influence density: 0.4 # <--- The Safe Maximum for Knowledge # 4️⃣ Output [28-32] - sources: - model: meta-llama/Meta-Llama-3.1-8B-Instruct layer_range: [28, 32]