Skip to content

Commit 3bf785f

Browse files
authored
llama : Llama-3_1-Nemotron-Ultra-253B-v1 support (#12843)
1 parent 1d36b36 commit 3bf785f

File tree

3 files changed

+24
-5
lines changed

3 files changed

+24
-5
lines changed

convert_hf_to_gguf.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -2123,6 +2123,9 @@ def __init__(self, *args, **kwargs):
21232123
# if n_heads_in_group is not None, then
21242124
# _num_kv_heads[il] is num_attention_head // n_heads_in_group and
21252125
# _num_heads[il] is num_attention_head
2126+
# ***dummy layer*** for nemotron 253B
2127+
# if n_heads_in_group is None and ffn_mult is None
2128+
# then _num_kv_heads[il] is 0 and _num_heads[il] is 0 and _ffn_dims is 0
21262129
for il in range(len(_block_configs)):
21272130
if _block_configs[il]["attention"]["n_heads_in_group"] is None:
21282131
if _block_configs[il]["attention"]["replace_with_linear"] is True:
@@ -2134,7 +2137,10 @@ def __init__(self, *args, **kwargs):
21342137
else:
21352138
self._num_kv_heads.append(self.hparams["num_attention_heads"] // _block_configs[il]["attention"]["n_heads_in_group"])
21362139
self._num_heads.append(self.hparams["num_attention_heads"])
2137-
_ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
2140+
if _block_configs[il]["ffn"]["ffn_mult"] is None: # dummy layer
2141+
_ffn_multipliers.append(0.0)
2142+
else:
2143+
_ffn_multipliers.append(_block_configs[il]["ffn"]["ffn_mult"])
21382144
assert self.block_count == len(self._num_kv_heads)
21392145
assert self.block_count == len(self._num_heads)
21402146
assert self.block_count == len(_ffn_multipliers)

src/llama-model.cpp

+16-4
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ const char * llm_type_name(llm_type type) {
8080
case LLM_TYPE_236B: return "236B";
8181
case LLM_TYPE_290B: return "290B";
8282
case LLM_TYPE_314B: return "314B";
83+
case LLM_TYPE_405B: return "405B";
8384
case LLM_TYPE_671B: return "671B";
8485
case LLM_TYPE_SMALL: return "0.1B";
8586
case LLM_TYPE_MEDIUM: return "0.4B";
@@ -582,6 +583,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
582583
switch (hparams.n_layer) {
583584
case 32: type = LLM_TYPE_7B; break;
584585
case 80: type = LLM_TYPE_70B; break;
586+
case 162: type = LLM_TYPE_405B; break;
585587
default: type = LLM_TYPE_UNKNOWN;
586588
}
587589
} break;
@@ -1848,7 +1850,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18481850
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
18491851
layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
18501852

1851-
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1853+
if (n_ff > 0) {
1854+
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
1855+
}
18521856

18531857
if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
18541858
layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
@@ -1858,9 +1862,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
18581862
layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
18591863
}
18601864

1861-
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1862-
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1863-
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1865+
if (n_ff > 0) {
1866+
layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
1867+
layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
1868+
layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
1869+
}
18641870

18651871
// optional MLP bias
18661872
layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
@@ -4705,6 +4711,7 @@ struct llm_build_deci : public llm_graph_context {
47054711
ggml_tensor * inpSA = inpL;
47064712
const int64_t n_head_kv = hparams.n_head_kv(il);
47074713
const int64_t n_head = hparams.n_head(il);
4714+
const int64_t n_ff = hparams.n_ff(il);
47084715

47094716
if (n_head == 0) {
47104717
// attention-free layer of Llama-3_1-Nemotron-51B
@@ -4780,6 +4787,11 @@ struct llm_build_deci : public llm_graph_context {
47804787
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
47814788
}
47824789

4790+
// FFN-free layer of Llama-3_1-Nemotron-Ultra-253B
4791+
if (n_head == 0 && n_ff == 0) {
4792+
continue;
4793+
}
4794+
47834795
// For Granite architecture
47844796
if (hparams.f_residual_scale) {
47854797
cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);

src/llama-model.h

+1
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ enum llm_type {
7676
LLM_TYPE_236B,
7777
LLM_TYPE_290B,
7878
LLM_TYPE_314B,
79+
LLM_TYPE_405B,
7980
LLM_TYPE_671B,
8081
LLM_TYPE_SMALL,
8182
LLM_TYPE_MEDIUM,

0 commit comments

Comments
 (0)