@@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) {
80
80
case LLM_TYPE_236B: return "236B";
81
81
case LLM_TYPE_290B: return "290B";
82
82
case LLM_TYPE_314B: return "314B";
83
+ case LLM_TYPE_405B: return "405B";
83
84
case LLM_TYPE_671B: return "671B";
84
85
case LLM_TYPE_SMALL: return "0.1B";
85
86
case LLM_TYPE_MEDIUM: return "0.4B";
@@ -582,6 +583,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
582
583
switch (hparams.n_layer) {
583
584
case 32: type = LLM_TYPE_7B; break;
584
585
case 80: type = LLM_TYPE_70B; break;
586
+ case 162: type = LLM_TYPE_405B; break;
585
587
default: type = LLM_TYPE_UNKNOWN;
586
588
}
587
589
} break;
@@ -1848,7 +1850,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
1848
1850
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
1849
1851
layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
1850
1852
1851
- layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1853
+ if (n_ff > 0) {
1854
+ layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1855
+ }
1852
1856
1853
1857
if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
1854
1858
layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
@@ -1858,9 +1862,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
1858
1862
layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
1859
1863
}
1860
1864
1861
- layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1862
- layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1863
- layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1865
+ if (n_ff > 0) {
1866
+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1867
+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1868
+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1869
+ }
1864
1870
1865
1871
// optional MLP bias
1866
1872
layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
@@ -4705,6 +4711,7 @@ struct llm_build_deci : public llm_graph_context {
4705
4711
ggml_tensor * inpSA = inpL;
4706
4712
const int64_t n_head_kv = hparams.n_head_kv(il);
4707
4713
const int64_t n_head = hparams.n_head(il);
4714
+ const int64_t n_ff = hparams.n_ff(il);
4708
4715
4709
4716
if (n_head == 0) {
4710
4717
// attention-free layer of Llama-3_1-Nemotron-51B
@@ -4780,6 +4787,11 @@ struct llm_build_deci : public llm_graph_context {
4780
4787
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
4781
4788
}
4782
4789
4790
+ // FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
4791
+ if (n_head == 0 && n_ff == 0) {
4792
+ continue;
4793
+ }
4794
+
4783
4795
// For Granite architecture
4784
4796
if (hparams.f_residual_scale) {
4785
4797
cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
0 commit comments