From 1793f25cfa640c54db4c37bc8e587773e92cbcd0 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 02:56:47 -0600 Subject: [PATCH 01/17] convert: Fix permute calls and method/func definitions --- convert.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/convert.py b/convert.py index 3f0a1c932d58f..49cc4b3dd27ce 100755 --- a/convert.py +++ b/convert.py @@ -439,7 +439,7 @@ def astype(self, data_type: DataType) -> 'Tensor': ... @abstractmethod def permute(self, n_head: int, n_head_kv: int) -> 'Tensor': ... @abstractmethod - def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ... + def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> 'UnquantizedTensor': ... @abstractmethod def part(self, n_part: int) -> 'UnquantizedTensor': ... @abstractmethod @@ -467,9 +467,9 @@ def astype(self, data_type: DataType) -> Tensor: def to_ggml(self) -> 'UnquantizedTensor': return self - def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': + def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> 'UnquantizedTensor': r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head)) + return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head_kv)) def part(self, n_part: int) -> 'UnquantizedTensor': r = self.ndarray.shape[0] // 3 @@ -597,12 +597,12 @@ def load() -> Tensor: return lazy_tensor.load().permute(n_head, n_head_kv) return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) -def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor: +def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int, n_head_kv: int) -> LazyTensor: def load() -> Tensor: - return lazy_tensor.load().permute_part(n_part, n_head) + return lazy_tensor.load().permute_part(n_part, n_head, n_head_kv) s = lazy_tensor.shape.copy() s[0] = s[0] // 3 - return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description) + return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: def load() -> Tensor: @@ -952,8 +952,8 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel: #tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] elif f"model.layers.{i}.self_attn.W_pack.weight" in model: print(f"Unpacking and permuting layer {i}") - tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head) - tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head) + tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head) + tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv) tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) del tmp[f"model.layers.{i}.self_attn.W_pack.weight"] else: From 084dd216cd18cd33023853ad83107efebdb5088e Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 10:58:13 -0600 Subject: [PATCH 02/17] Cleanups for gguf-py --- gguf-py/gguf/gguf.py | 108 ++++++++++++++++++++--------------------- gguf-py/gguf/py.typed | 0 gguf-py/pyproject.toml | 1 + 3 files changed, 55 insertions(+), 54 deletions(-) create mode 100644 gguf-py/gguf/py.typed diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 838a2c0f8aa4f..8f119c53490e3 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -6,7 +6,7 @@ import numpy as np from enum import IntEnum, auto -from typing import Any, IO, List, Optional +from typing import Any, BinaryIO, IO, Dict, List, Optional, Tuple # # constants @@ -71,35 +71,35 @@ class MODEL_ARCH(IntEnum): - LLAMA = auto() - FALCON = auto() - GPT2 = auto() - GPTJ = auto() - GPTNEOX = auto() - MPT = auto() + LLAMA : int = auto() + FALCON : int = auto() + GPT2 : int = auto() + GPTJ : int = auto() + GPTNEOX: int = auto() + MPT : int = auto() class MODEL_TENSOR(IntEnum): - TOKEN_EMBD = auto() - POS_EMBD = auto() - OUTPUT = auto() - OUTPUT_NORM = auto() - ROPE_FREQS = auto() - ATTN_Q = auto() - ATTN_K = auto() - ATTN_V = auto() - ATTN_QKV = auto() - ATTN_OUT = auto() - ATTN_NORM = auto() - ATTN_NORM_2 = auto() - ATTN_ROT_EMBD = auto() - FFN_GATE = auto() - FFN_DOWN = auto() - FFN_UP = auto() - FFN_NORM = auto() - - -MODEL_ARCH_NAMES = { + TOKEN_EMBD : int = auto() + POS_EMBD : int = auto() + OUTPUT : int = auto() + OUTPUT_NORM : int = auto() + ROPE_FREQS : int = auto() + ATTN_Q : int = auto() + ATTN_K : int = auto() + ATTN_V : int = auto() + ATTN_QKV : int = auto() + ATTN_OUT : int = auto() + ATTN_NORM : int = auto() + ATTN_NORM_2 : int = auto() + ATTN_ROT_EMBD: int = auto() + FFN_GATE : int = auto() + FFN_DOWN : int = auto() + FFN_UP : int = auto() + FFN_NORM : int = auto() + + +MODEL_ARCH_NAMES: Dict[MODEL_ARCH, str] = { MODEL_ARCH.LLAMA: "llama", MODEL_ARCH.FALCON: "falcon", MODEL_ARCH.GPT2: "gpt2", @@ -108,7 +108,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.MPT: "mpt", } -MODEL_TENSOR_NAMES = { +MODEL_TENSOR_NAMES: Dict[MODEL_ARCH, Dict[MODEL_TENSOR, str]] = { MODEL_ARCH.LLAMA: { MODEL_TENSOR.TOKEN_EMBD: "token_embd", MODEL_TENSOR.OUTPUT_NORM: "output_norm", @@ -154,7 +154,7 @@ class MODEL_TENSOR(IntEnum): } # tensors that will not be serialized -MODEL_TENSOR_SKIP = { +MODEL_TENSOR_SKIP: Dict[MODEL_ARCH, List[MODEL_TENSOR]] = { MODEL_ARCH.LLAMA: [ MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_ROT_EMBD, @@ -388,15 +388,21 @@ def get_type(val): class GGUFWriter: + fout: BinaryIO + arch: str + offset_tensor = 0 + data_alignment = GGUF_DEFAULT_ALIGNMENT + kv_data = b"" + kv_data_count = 0 + ti_data = b"" + ti_data_count = 0 + use_temp_file: bool + temp_file: Optional[tempfile.SpooledTemporaryFile[bytes]] = None + tensors: List[Tuple[np.ndarray, int]] + def __init__(self, path: str, arch: str, use_temp_file = True): self.fout = open(path, "wb") self.arch = arch - self.offset_tensor = 0 - self.data_alignment = GGUF_DEFAULT_ALIGNMENT - self.kv_data = b"" - self.kv_data_count = 0 - self.ti_data = b"" - self.ti_data_count = 0 self.add_architecture() self.use_temp_file = use_temp_file self.tensors = [] @@ -477,7 +483,7 @@ def add_array(self, key: str, val: list): self.add_key(key) self.add_val(val, GGUFValueType.ARRAY) - def add_val(self: str, val: Any, vtype: GGUFValueType = None, add_vtype: bool = True): + def add_val(self, val: Any, vtype: Optional[GGUFValueType] = None, add_vtype: bool = True): if vtype is None: vtype = GGUFValueType.get_type(val) @@ -545,15 +551,16 @@ def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np. self.ti_data_count += 1 def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarray] = None, raw_dtype: Optional[GGMLQuantizationType] = None): - if self.use_temp_file and not hasattr(self, "temp_file"): - self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) - self.temp_file.seek(0) + if self.use_temp_file and self.temp_file is None: + fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) + fp.seek(0) + self.temp_file = fp self.add_tensor_info(name, raw_shape if raw_shape is not None else tensor.shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes - if not self.use_temp_file: + if self.temp_file is None: self.tensors.append((tensor, pad)) return @@ -562,23 +569,20 @@ def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarr if pad != 0: self.temp_file.write(bytes([0] * pad)) - def write_tensor_data(self, tensor: np.ndarray): - pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell() + def write_padding(self, fp: BinaryIO, n: int, align: Optional[int] = None): + pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n if pad != 0: - self.fout.write(bytes([0] * pad)) + fp.write(bytes([0] * pad)) + def write_tensor_data(self, tensor: np.ndarray): + self.write_padding(self.fout, self.fout.tell()) tensor.tofile(self.fout) - - pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes - if pad != 0: - self.fout.write(bytes([0] * pad)) + self.write_padding(self.fout, tensor.nbytes) def write_tensors_to_file(self): self.write_ti_data_to_file() - pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell() - if pad != 0: - self.fout.write(bytes([0] * pad)) + self.write_padding(self.fout, self.fout.tell()) if not self.use_temp_file: for (currtensor, currpad) in self.tensors: @@ -654,10 +658,6 @@ def add_parallel_residual(self, use: bool): self.add_bool( KEY_USE_PARALLEL_RESIDUAL.format(arch=self.arch), use) - def add_tensor_data_layout(self, layout: str): - self.add_string( - KEY_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout) - def add_head_count(self, count: int): self.add_uint32( KEY_ATTENTION_HEAD_COUNT.format(arch=self.arch), count) diff --git a/gguf-py/gguf/py.typed b/gguf-py/gguf/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index cc70e28b7206e..c66b069f9b166 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -5,6 +5,7 @@ description = "Write ML models in GGUF for GGML" authors = ["GGML "] packages = [ {include = "gguf"}, + {include = "gguf/py.typed"}, ] readme = "README.md" homepage = "https://ggml.ai" From 795c0c6e9d633b0bf48ee804b6fd90e6a4761208 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 11:16:46 -0600 Subject: [PATCH 03/17] Minor types cleanups. --- convert-falcon-hf-to-gguf.py | 4 ++-- convert-llama-hf-to-gguf.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 168bcf17f0900..5c69d13491e9c 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -113,7 +113,7 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tokenizer metadata") -tokens: List[str] = [] +tokens: List[bytearray] = [] scores: List[float] = [] toktypes: List[int] = [] merges: List[str] = [] @@ -199,7 +199,7 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tensor metadata") if num_parts == 0: - part_names = ("pytorch_model.bin",) + part_names = iter(("pytorch_model.bin",)) else: part_names = ( f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index b00810dbbc21a..534b71add519a 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -9,7 +9,7 @@ import numpy as np import torch -from typing import Any, List, Optional +from typing import Any, List, Optional, TypeAlias from pathlib import Path from sentencepiece import SentencePieceProcessor @@ -254,7 +254,7 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tensor metadata") if num_parts == 0: - part_names = ("pytorch_model.bin",) + part_names = iter(("pytorch_model.bin",)) else: part_names = ( f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) From ea43267819e2dae786dae1a96a159e9fd0bb7e13 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 12:31:24 -0600 Subject: [PATCH 04/17] Initial implementation of handling merges and special tokens --- convert.py | 120 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 97 insertions(+), 23 deletions(-) diff --git a/convert.py b/convert.py index 49cc4b3dd27ce..2ca8a018d8fc2 100755 --- a/convert.py +++ b/convert.py @@ -353,7 +353,7 @@ def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: yield from self.added_tokens() def __repr__(self) -> str: - return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>" + return f"" class SentencePieceVocab: @@ -416,6 +416,72 @@ def __repr__(self) -> str: Vocab = Union[BpeVocab, SentencePieceVocab] +class SpecialVocab: + merges: List[str] = [] + special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) + special_token_ids: Dict[str, int] = {} + + def __init__(self, path: Path, special_token_types: Optional[Tuple[str]] = None): + self.special_token_ids = {} + if special_token_types is not None: + self.special_token_types = special_token_types + self.load(path) + + def load(self, path: Path): + if not self.try_load_from_tokenizer_json(path): + self.try_load_from_config_json(path) + + def try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer_file = path / 'tokenizer.json' + if not tokenizer_file.is_file(): + return False + with open(tokenizer_file, 'r', encoding = 'utf-8') as f: + tokenizer = json.load(f) + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): + self.merges = merges + tokenizer_config_file = path / 'tokenizer_config.json' + added_tokens = tokenizer.get('added_tokens') + if added_tokens is None or not tokenizer_config_file.is_file(): + return True + with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + for typ in self.special_token_types: + tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content') + if not isinstance(tc_content, str): + continue + for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + break + return True + + def try_load_from_config_json(self, path: Path) -> bool: + config_file = path / 'config.json' + if not config_file.is_file(): + return False + with open(config_file, 'r', encoding = 'utf-8') as f: + config = json.load(f) + for typ in self.special_token_types: + maybe_token_id = config.get(f'{typ}_token_id') + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + return True + + def add_to_gguf(self, gw: gguf.GGUFWriter): + if len(self.merges) > 0: + print(f'SpecialVocab: Adding {len(self.merges)} merge(s).') + gw.add_token_merges(self.merges) + for typ, tokid in self.special_token_ids.items(): + handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None) + if handler is None: + print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping') + continue + print(f'SpecialVocab: Setting special token type {typ} to {tokid}') + handler(tokid) + + def __repr__(self): + return f'' # # data loading @@ -843,6 +909,9 @@ def add_meta_vocab(self, vocab: Vocab) -> None: self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) + def add_meta_special_vocab(self, svocab: SpecialVocab) -> None: + svocab.add_to_gguf(self.gguf) + def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: n_elements = int(np.prod(tensor.shape)) raw_dtype = getattr(tensor.data_type, 'ggml_type', None) @@ -887,7 +956,7 @@ def maybe_do_quantize(item: Tuple[DataType, NDArray]) -> NDArray: return dt.quantize(arr) @staticmethod - def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: + def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out) @@ -895,6 +964,7 @@ def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyM # meta data of.add_meta_arch(params) of.add_meta_vocab(vocab) + of.add_meta_special_vocab(svocab) # tensor info for name, lazy_tensor in model.items(): @@ -1120,6 +1190,10 @@ def main(args_in: Optional[List[str]] = None) -> None: model_plus = load_some_model(args.model) + if args.dump: + do_dump_model(model_plus) + return + params = Params.load(model_plus) if params.n_ctx == -1: if args.ctx is None: @@ -1140,33 +1214,33 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab: Vocab if args.vocab_only: + # FIXME: Handle special vocab here also. vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" outfile = args.outfile OutputFile.write_vocab_only(outfile, params, vocab) print(f"Wrote {outfile}") - else: - if args.dump: - do_dump_model(model_plus) - return + return - if model_plus.vocab is not None and args.vocab_dir is None: - vocab = model_plus.vocab - else: - vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent - vocab = load_vocab(vocab_dir, args.vocabtype) - - model = model_plus.model - model = convert_model_names(model, params) - ftype = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_outfile(model_plus.paths, ftype) - - params.ftype = ftype - print(f"Writing {outfile}, format {ftype}") - - OutputFile.write_all(outfile, ftype, params, model, vocab, concurrency = args.concurrency) - print(f"Wrote {outfile}") + if model_plus.vocab is not None and args.vocab_dir is None: + vocab = model_plus.vocab + else: + vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent + vocab = load_vocab(vocab_dir, args.vocabtype) + # FIXME: Try to respect vocab_dir somehow? + special_vocab = SpecialVocab(model_plus.paths[0].parent) + + model = model_plus.model + model = convert_model_names(model, params) + ftype = pick_output_type(model, args.outtype) + model = convert_to_output_type(model, ftype) + outfile = args.outfile or default_outfile(model_plus.paths, ftype) + + params.ftype = ftype + print(f"Writing {outfile}, format {ftype}") + + OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, concurrency = args.concurrency) + print(f"Wrote {outfile}") if __name__ == '__main__': From c7b0952eb799def43465fd518511043b82d6dc94 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 13:17:18 -0600 Subject: [PATCH 05/17] convert: Handle special tokens and merges in vocab only mode convert: Vocab only mode no longer requires loading model tensors --- convert.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/convert.py b/convert.py index 2ca8a018d8fc2..b3b003f3b86f4 100755 --- a/convert.py +++ b/convert.py @@ -299,8 +299,10 @@ def load(model_plus: 'ModelPlus') -> 'Params': params = Params.loadHFTransformerJson(model_plus.model, hf_config_path) elif orig_config_path.exists(): params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path) - else: + elif model_plus.format != 'none': params = Params.guessed(model_plus.model) + else: + raise ValueError('Cannot guess params when model format is none') params.path_model = model_plus.paths[0].parent @@ -597,7 +599,7 @@ def validate_conversion_to(self, data_type: DataType) -> None: class ModelPlus: model: LazyModel paths: List[Path] # Where this was read from. - format: Literal['ggml', 'torch', 'safetensors'] + format: Literal['ggml', 'torch', 'safetensors', 'none'] vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab. @@ -930,7 +932,7 @@ def close(self) -> None: self.gguf.close() @staticmethod - def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None: + def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab, svocab: SpecialVocab) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out) @@ -938,6 +940,8 @@ def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None: # meta data of.add_meta_arch(params) of.add_meta_vocab(vocab) + of.add_meta_special_vocab(svocab) + of.write_meta() of.close() @@ -1187,8 +1191,13 @@ def main(args_in: Optional[List[str]] = None) -> None: if args.dump_single: model_plus = lazy_load_file(args.model) do_dump_model(model_plus) + return - model_plus = load_some_model(args.model) + if not args.vocab_only: + model_plus = load_some_model(args.model) + else: + # You can no longer use guessed parameters for your vocab only model. Does anyone actually care? + model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None) if args.dump: do_dump_model(model_plus) @@ -1214,11 +1223,12 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab: Vocab if args.vocab_only: - # FIXME: Handle special vocab here also. - vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" + # FIXME: Try to respect vocab_dir somehow? + vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) + special_vocab = SpecialVocab(model_plus.paths[0].parent) outfile = args.outfile - OutputFile.write_vocab_only(outfile, params, vocab) + OutputFile.write_vocab_only(outfile, params, vocab, special_vocab) print(f"Wrote {outfile}") return From 531746e9535491de839c2539e09080da5d78bcaf Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 17:18:47 -0600 Subject: [PATCH 06/17] gguf: Refactor tensor name mapping --- convert-falcon-hf-to-gguf.py | 11 +- convert-gptneox-hf-to-gguf.py | 15 +- convert-llama-7b-pth-to-gguf.py | 13 +- convert-llama-ggmlv3-to-gguf.py | 11 +- convert-llama-hf-to-gguf.py | 11 +- convert-lora-to-ggml.py | 4 +- convert.py | 22 +-- gguf-py/gguf/gguf.py | 303 +++++++++++++++++--------------- 8 files changed, 195 insertions(+), 195 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 5c69d13491e9c..d6a01ec013359 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -238,11 +238,8 @@ def count_model_parts(dir_model: str) -> int: data = data.squeeze().numpy() # map tensor names - if name.endswith(".weight") and name[:-7] in tensor_map: - name = tensor_map[name[:-7]] + ".weight" - elif name.endswith(".bias") and name[:-5] in tensor_map: - name = tensor_map[name[:-5]] + ".bias" - else: + new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) + if new_name is None: print("Can not map tensor '" + name + "'") sys.exit() @@ -261,9 +258,9 @@ def count_model_parts(dir_model: str) -> int: if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: data = data.astype(np.float16) - print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - gguf_writer.add_tensor(name, data) + gguf_writer.add_tensor(new_name, data) print("gguf: write header") diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index d9c42d76b1ff5..88ac7ff7b9b65 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -111,7 +111,7 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tokenizer metadata") -tokens: List[str] = [] +tokens: List[bytearray] = [] merges: List[str] = [] @@ -200,7 +200,7 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tensor metadata") if num_parts == 0: - part_names = ("pytorch_model.bin",) + part_names = iter(("pytorch_model.bin",)) else: part_names = ( f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) @@ -226,11 +226,8 @@ def count_model_parts(dir_model: str) -> int: data = data.squeeze().numpy() # map tensor names - if name.endswith(".weight") and name[:-7] in tensor_map: - name = tensor_map[name[:-7]] + ".weight" - elif name.endswith(".bias") and name[:-5] in tensor_map: - name = tensor_map[name[:-5]] + ".bias" - else: + new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) + if new_name is None: print("Can not map tensor '" + name + "'") sys.exit() @@ -249,9 +246,9 @@ def count_model_parts(dir_model: str) -> int: if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: data = data.astype(np.float16) - print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - gguf_writer.add_tensor(name, data) + gguf_writer.add_tensor(new_name, data) print("gguf: write header") diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 2ab08238335ef..037b126ede935 100755 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -11,7 +11,7 @@ import numpy as np import torch -from typing import Any, List +from typing import Any, List, TypeAlias from pathlib import Path from sentencepiece import SentencePieceProcessor @@ -266,11 +266,8 @@ def count_model_parts(dir_model: str) -> int: data = data.squeeze().numpy() # map tensor names - if name.endswith(".weight") and name[:-7] in tensor_map: - name = tensor_map[name[:-7]] + ".weight" - elif name.endswith(".bias") and name[:-5] in tensor_map: - name = tensor_map[name[:-5]] + ".bias" - else: + new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) + if new_name is None: print("Can not map tensor '" + name + "'") sys.exit() @@ -289,9 +286,9 @@ def count_model_parts(dir_model: str) -> int: if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: data = data.astype(np.float16) - print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - gguf_writer.add_tensor(name, data) + gguf_writer.add_tensor(new_name, data) print("gguf: write header") diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 3bf93627d225c..e03cdda708c5f 100755 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -259,20 +259,13 @@ def add_vocab(self, gguf_writer): gguf_writer.add_eos_token_id(2) def add_tensors(self, gguf_writer): - nm = self.name_map + tensor_map = self.name_map data = self.data print(f'* Adding {len(self.model.tensors)} tensor(s)') for tensor in self.model.tensors: name = str(tensor.name, 'UTF-8') - if name.endswith('.weight'): - name = name[:-7] - suffix = '.weight' - elif name.endswith('.bias'): - name = name[:-5] - suffix = '.bias' - mapped_name = nm.get(name) + mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) assert mapped_name is not None, f'Bad name {name}' - mapped_name += suffix tempdims = list(tensor.dims[:]) if len(tempdims) > 1: temp = tempdims[1] diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index 534b71add519a..e8da448caeaa3 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -286,11 +286,8 @@ def count_model_parts(dir_model: str) -> int: data = reverse_hf_permute(data, head_count, head_count_kv) # map tensor names - if name.endswith(".weight") and name[:-7] in tensor_map: - name = tensor_map[name[:-7]] + ".weight" - elif name.endswith(".bias") and name[:-5] in tensor_map: - name = tensor_map[name[:-5]] + ".bias" - else: + new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) + if new_name is None: print("Can not map tensor '" + name + "'") sys.exit() @@ -309,9 +306,9 @@ def count_model_parts(dir_model: str) -> int: if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: data = data.astype(np.float16) - print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - gguf_writer.add_tensor(name, data) + gguf_writer.add_tensor(new_name, data) print("gguf: write header") diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index a94a7d0afbdaf..1112532bae12d 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -4,7 +4,7 @@ import re import struct import sys -from typing import Any, Dict, Sequence, TextIO +from typing import Any, Dict, Sequence, BinaryIO import numpy as np import torch @@ -46,7 +46,7 @@ def translate_tensor_name(t: str) -> str: sys.exit(1) -def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None: +def write_file_header(fout: BinaryIO, params: Dict[str, Any]) -> None: fout.write(b"ggla"[::-1]) # magic (ggml lora) fout.write(struct.pack("i", 1)) # file version fout.write(struct.pack("i", params["r"])) diff --git a/convert.py b/convert.py index b3b003f3b86f4..539ee2c415f04 100755 --- a/convert.py +++ b/convert.py @@ -1013,7 +1013,8 @@ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyM for (name, tensor) in model.items()} def convert_model_names(model: LazyModel, params: Params) -> LazyModel: - tmap = gguf.get_tensor_name_map(ARCH, params.n_layer) + tmap = gguf.TensorNameMap(ARCH, params.n_layer) + should_skip: Set[gguf.MODEL_TENSOR] = gguf.MODEL_TENSOR_SKIP.get(ARCH, set()) tmp = model @@ -1035,23 +1036,16 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel: out: LazyModel = {} for name, lazy_tensor in model.items(): - name_new = name - - if name in tmap: - name_new = tmap[name] - elif name.endswith(".weight") and name[:-7] in tmap: - name_new = tmap[name[:-7]] + ".weight" - elif name.endswith(".bias") and name[:-5] in tmap: - name_new = tmap[name[:-5]] + ".bias" - else: + tensor_type, name_new = tmap.get_both(name, try_suffixes = (".weight", ".bias")) or (None, None) + if name_new is None: raise Exception(f"Unexpected tensor name: {name}") - if gguf.should_skip_tensor_TMP(ARCH, params.n_layer, name_new): + if tensor_type in should_skip: print(f"skipping tensor {name_new}") continue - else: - print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}") - out[name_new] = lazy_tensor + + print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}") + out[name_new] = lazy_tensor return out diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 8f119c53490e3..684cf397d51fa 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -6,7 +6,7 @@ import numpy as np from enum import IntEnum, auto -from typing import Any, BinaryIO, IO, Dict, List, Optional, Tuple +from typing import Any, BinaryIO, IO, Dict, List, Optional, Sequence, Tuple # # constants @@ -162,167 +162,192 @@ class MODEL_TENSOR(IntEnum): } -# TODO: the following helper functions should be removed -# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR) -# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions -# REMOVE -def should_skip_tensor_TMP(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool: - for skip in MODEL_TENSOR_SKIP.get(arch, []): - for i in range(n_blocks): - if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i): - return True - - return False - - -def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict: - tensor_map = {} - - # Token embeddings - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None) - - tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox - tensor_map["transformer.wte"] = mapped_to # gpt2 mpt - tensor_map["transformer.word_embeddings"] = mapped_to # falcon - tensor_map["model.embed_tokens"] = mapped_to # llama-hf - tensor_map["tok_embeddings"] = mapped_to # llama-pth - - # Position embeddings - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None) - - tensor_map["transformer.wpe"] = mapped_to # gpt2 - - # Output - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None) - - tensor_map["embed_out"] = mapped_to # gptneox - tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf - tensor_map["output"] = mapped_to # llama-pth - - # Output norm - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None) - - tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox - tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon - tensor_map["transformer.norm_f"] = mapped_to # mpt - tensor_map["model.norm"] = mapped_to # llama-hf - tensor_map["norm"] = mapped_to # llama-pth - - # Rope frequencies - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None) - - tensor_map["rope.freqs"] = mapped_to # llama-pth - - # Attention and feed-forward blocks - for i in range(0, n_blocks): +class TensorNameMap: + mappings_cfg: Dict[MODEL_TENSOR, Tuple[str, ...]] = { + # Token embeddings + MODEL_TENSOR.TOKEN_EMBD: ( + "gpt_neox.embed_in", # gptneox + "transformer.wte", # gpt2 mpt + "transformer.word_embeddings", # falcon + "model.embed_tokens", # llama-hf + "tok_embeddings", # llama-pth + ), + + # Position embeddings + MODEL_TENSOR.POS_EMBD: ( + "transformer.wpe", # gpt2 + ), + + # Output + MODEL_TENSOR.OUTPUT: ( + "embed_out", # gptneox + "lm_head", # gpt2 mpt falcon llama-hf + "output", # llama-pth + ), + + # Output norm + MODEL_TENSOR.OUTPUT_NORM: ( + "gpt_neox.final_layer_norm", # gptneox + "transformer.ln_f", # gpt2 falcon + "model.norm", # llama-hf + "norm", # llama-pth + ), + + # Rope frequencies + MODEL_TENSOR.ROPE_FREQS: ( + "rope.freqs", # llama-pth + ), + } + + block_mappings_cfg: Dict[MODEL_TENSOR, Tuple[str, ...]] = { # Attention norm - # TODO: is there are simpler way to write these 2 lines in Python? - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None) - mapped_to = mapped_to.format(bid=i) if mapped_to else None - - tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt - tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b - tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b - tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_NORM: ( + "gpt_neox.layers.{bid}.input_layernorm", # gptneox + "transformer.h.{bid}.ln_1", # gpt2 + "transformer.blocks.{bid}.norm_1", # mpt + "transformer.h.{bid}.input_layernorm", # falcon7b + "transformer.h.{bid}.ln_mlp", # falcon40b + "model.layers.{bid}.input_layernorm", # llama-hf + "layers.{bid}.attention_norm", # llama-pth + ), # Attention norm 2 - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b + MODEL_TENSOR.ATTN_NORM_2: ( + "transformer.h.{bid}.ln_attn", # falcon40b + ), # Attention query-key-value - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt - tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon + MODEL_TENSOR.ATTN_QKV: ( + "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox + "transformer.h.{bid}.attn.c_attn", # gpt2 + "transformer.blocks.{bid}.attn.Wqkv", # mpt + "transformer.h.{bid}.self_attention.query_key_value", # falcon + ), # Attention query - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_Q: ( + "model.layers.{bid}.self_attn.q_proj", # llama-hf + "layers.{bid}.attention.wq", # llama-pth + ), # Attention key - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_K: ( + "model.layers.{bid}.self_attn.k_proj", # llama-hf + "layers.{bid}.attention.wk", # llama-pth + ), # Attention value - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_V: ( + "model.layers.{bid}.self_attn.v_proj", # llama-hf + "layers.{bid}.attention.wv", # llama-pth + ), # Attention output - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt - tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon - tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_OUT: ( + "gpt_neox.layers.{bid}.attention.dense", # gptneox + "transformer.h.{bid}.attn.c_proj", # gpt2 + "transformer.blocks.{bid}.attn.out_proj", # mpt + "transformer.h.{bid}.self_attention.dense", # falcon + "model.layers.{bid}.self_attn.o_proj", # llama-hf + "layers.{bid}.attention.wo", # llama-pth + ), # Rotary embeddings - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth + MODEL_TENSOR.ATTN_ROT_EMBD: ( + "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf + "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + ), # Feed-forward norm - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt - tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth + MODEL_TENSOR.FFN_NORM: ( + "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox + "transformer.h.{bid}.ln_2", # gpt2 + "transformer.blocks.{bid}.norm_2", # mpt + "model.layers.{bid}.post_attention_layernorm", # llama-hf + "layers.{bid}.ffn_norm", # llama-pth + ), # Feed-forward up - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt - tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon - tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth + MODEL_TENSOR.FFN_UP: ( + "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox + "transformer.h.{bid}.mlp.c_fc", # gpt2 + "transformer.blocks.{bid}.ffn.up_proj", # mpt + "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon + "model.layers.{bid}.mlp.up_proj", # llama-hf + "layers.{bid}.feed_forward.w3", # llama-pth + ), # Feed-forward gate - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth + MODEL_TENSOR.FFN_GATE: ( + "model.layers.{bid}.mlp.gate_proj", # llama-hf + "layers.{bid}.feed_forward.w1", # llama-pth + ), # Feed-forward down - mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None) - mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None - - tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox - tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2 - tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt - tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon - tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf - tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth - - return tensor_map - + MODEL_TENSOR.FFN_DOWN: ( + "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox + "transformer.h.{bid}.mlp.c_proj", # gpt2 + "transformer.blocks.{bid}.ffn.down_proj", # mpt + "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon + "model.layers.{bid}.mlp.down_proj", # llama-hf + "layers.{bid}.feed_forward.w2", # llama-pth + ), + } + + mapping: Dict[str, Tuple[MODEL_TENSOR, str]] + + tensor_names: Dict[MODEL_TENSOR, str] + + def __init__(self, arch: MODEL_ARCH, n_blocks: int): + mapping = self.mapping = {} + tensor_names = self.tensor_names = MODEL_TENSOR_NAMES[arch] + for tensor, keys in self.mappings_cfg.items(): + tensor_name = tensor_names.get(tensor) + if tensor_name is None: + continue + for key in keys: + mapping[key] = (tensor, tensor_name) + for bid in range(n_blocks): + for tensor, keys in self.block_mappings_cfg.items(): + tensor_name = tensor_names.get(tensor) + if tensor_name is None: + continue + tensor_name = tensor_name.format(bid = bid) + for key in keys: + key = key.format(bid = bid) + mapping[key] = (tensor, tensor_name) + + def get_both(self, key: str, try_suffixes: Sequence[str]) -> Optional[Tuple[MODEL_TENSOR, str]]: + result = self.mapping.get(key) + if result is not None: + return result + for suffix in try_suffixes: + if key.endswith(suffix): + result = self.mapping.get(key[:-len(suffix)]) + if result is not None: + return (result[0], result[1] + suffix) + return None + + def get_name(self, key: str, try_suffixes: Sequence[str]) -> Optional[str]: + result = self.get_both(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[1] + + def __getitem__(self, key: str) -> str: + try: + return self.mapping[key][1] + except KeyError: + raise KeyError(key) + + def __contains__(self, key: str) -> bool: + return key in self.mapping + + def __repr__(self) -> str: + return repr(self.mapping) + +def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap: + return TensorNameMap(arch, n_blocks) class TokenType(IntEnum): NORMAL = 1 From 120ed6453f5b917ec7007723d5ab28e33f9310e8 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 19:36:07 -0600 Subject: [PATCH 07/17] convert: Fix type hint for special_token_types in SpecialVocab --- convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert.py b/convert.py index 539ee2c415f04..05904209ba0d5 100755 --- a/convert.py +++ b/convert.py @@ -423,7 +423,7 @@ class SpecialVocab: special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) special_token_ids: Dict[str, int] = {} - def __init__(self, path: Path, special_token_types: Optional[Tuple[str]] = None): + def __init__(self, path: Path, special_token_types: Optional[Tuple[str, ...]] = None): self.special_token_ids = {} if special_token_types is not None: self.special_token_types = special_token_types From bb6b64d5e54288e5cba965a842b44d4f8c5427fb Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 27 Aug 2023 19:59:01 -0600 Subject: [PATCH 08/17] Use common special vocab handling in various conversion scripts --- convert-falcon-hf-to-gguf.py | 29 +++------------- convert-gptneox-hf-to-gguf.py | 44 +++--------------------- convert-llama-7b-pth-to-gguf.py | 60 +++------------------------------ convert-llama-ggmlv3-to-gguf.py | 15 ++++++--- convert-llama-hf-to-gguf.py | 60 +++------------------------------ 5 files changed, 27 insertions(+), 181 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index d6a01ec013359..de251a0fa4e27 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -13,6 +13,8 @@ from pathlib import Path from transformers import AutoTokenizer +from convert import SpecialVocab + def bytes_to_unicode(): # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py """ @@ -116,20 +118,13 @@ def count_model_parts(dir_model: str) -> int: tokens: List[bytearray] = [] scores: List[float] = [] toktypes: List[int] = [] -merges: List[str] = [] - if Path(dir_model + "/tokenizer.json").is_file(): # gpt2 tokenizer gguf_writer.add_tokenizer_model("gpt2") - print("gguf: get gpt2 tokenizer merges") - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: tokenizer_json = json.load(f) - merges = tokenizer_json["model"]["merges"] - - gguf_writer.add_token_merges(merges) print("gguf: get gpt2 tokenizer vocab") @@ -166,24 +161,8 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -print("gguf: get special token ids") -# Look for special tokens in config.json - -if "bos_token_id" in hparams and hparams["bos_token_id"] != None: - gguf_writer.add_bos_token_id(hparams["bos_token_id"]) - -if "eos_token_id" in hparams and hparams["eos_token_id"] != None: - gguf_writer.add_eos_token_id(hparams["eos_token_id"]) - -if "unk_token_id" in hparams and hparams["unk_token_id"] != None: - gguf_writer.add_unk_token_id(hparams["unk_token_id"]) - -if "sep_token_id" in hparams and hparams["sep_token_id"] != None: - gguf_writer.add_sep_token_id(hparams["sep_token_id"]) - -if "pad_token_id" in hparams and hparams["pad_token_id"] != None: - gguf_writer.add_pad_token_id(hparams["pad_token_id"]) - +special_vocab = SpecialVocab(Path(dir_model)) +special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index 88ac7ff7b9b65..a7695655d0446 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -13,6 +13,8 @@ from pathlib import Path from transformers import AutoTokenizer +from convert import SpecialVocab + # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py @@ -112,20 +114,13 @@ def count_model_parts(dir_model: str) -> int: print("gguf: get tokenizer metadata") tokens: List[bytearray] = [] -merges: List[str] = [] - if Path(dir_model + "/tokenizer.json").is_file(): # gpt2 tokenizer gguf_writer.add_tokenizer_model("gpt2") - print("gguf: get gpt2 tokenizer merges") - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: tokenizer_json = json.load(f) - merges = tokenizer_json["model"]["merges"] - - gguf_writer.add_token_merges(merges) print("gguf: get gpt2 tokenizer vocab") @@ -158,39 +153,8 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_list(tokens) - if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file(): - print("gguf: get special token ids") - - with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: - tokenizer_config = json.load(f) - - # find special token ids - - if "bos_token" in tokenizer_config: - for key in tokenizer_json["added_tokens"]: - if key["content"] == tokenizer_config["bos_token"]: - gguf_writer.add_bos_token_id(key["id"]) - - if "eos_token" in tokenizer_config: - for key in tokenizer_json["added_tokens"]: - if key["content"] == tokenizer_config["eos_token"]: - gguf_writer.add_eos_token_id(key["id"]) - - if "unk_token" in tokenizer_config: - for key in tokenizer_json["added_tokens"]: - if key["content"] == tokenizer_config["unk_token"]: - gguf_writer.add_unk_token_id(key["id"]) - - if "sep_token" in tokenizer_config: - for key in tokenizer_json["added_tokens"]: - if key["content"] == tokenizer_config["sep_token"]: - gguf_writer.add_sep_token_id(key["id"]) - - if "pad_token" in tokenizer_config: - for key in tokenizer_json["added_tokens"]: - if key["content"] == tokenizer_config["pad_token"]: - gguf_writer.add_pad_token_id(key["id"]) - +special_vocab = SpecialVocab(Path(dir_model)) +special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 037b126ede935..3c5506fe609bc 100755 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -15,6 +15,8 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor +from convert import SpecialVocab + #NDArray = np.ndarray[Any, Any] # compatible with python < 3.9 NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' @@ -180,62 +182,8 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) - -print("gguf: get special token ids") - -if Path(dir_model + "/tokenizer.json").is_file(): - # Look for special tokens in tokenizer.json if it exists - - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: - tokenizer = json.load(f) - - if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file(): - - with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: - tokenizer_config = json.load(f) - - if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["bos_token"]["content"]: - gguf_writer.add_bos_token_id(key["id"]) - - if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["eos_token"]["content"]: - gguf_writer.add_eos_token_id(key["id"]) - - if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["unk_token"]["content"]: - gguf_writer.add_unk_token_id(key["id"]) - - if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["sep_token"]["content"]: - gguf_writer.add_sep_token_id(key["id"]) - - if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["pad_token"]["content"]: - gguf_writer.add_pad_token_id(key["id"]) -else: - # If no tokenizer.json: Look for special tokens in config.json - - if "bos_token_id" in hparams and hparams["bos_token_id"] != None: - gguf_writer.add_bos_token_id(hparams["bos_token_id"]) - - if "eos_token_id" in hparams and hparams["eos_token_id"] != None: - gguf_writer.add_eos_token_id(hparams["eos_token_id"]) - - if "unk_token_id" in hparams and hparams["unk_token_id"] != None: - gguf_writer.add_unk_token_id(hparams["unk_token_id"]) - - if "sep_token_id" in hparams and hparams["sep_token_id"] != None: - gguf_writer.add_sep_token_id(hparams["sep_token_id"]) - - if "pad_token_id" in hparams and hparams["pad_token_id"] != None: - gguf_writer.add_pad_token_id(hparams["pad_token_id"]) - +special_vocab = SpecialVocab(Path(dir_model)) +special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index e03cdda708c5f..78ba683ad6ae5 100755 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -134,13 +134,14 @@ def load(self, data, offset): return offset class GGMLToGGUF: - def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None): + def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None, special_vocab = None): hp = ggml_model.hyperparameters self.model = ggml_model self.data = data self.cfg = cfg self.params_override = params_override self.vocab_override = vocab_override + self.special_vocab = special_vocab if params_override is not None: n_kv_head = params_override.n_head_kv else: @@ -162,6 +163,8 @@ def save(self): gguf_writer = gguf.GGUFWriter(self.cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False) self.add_params(gguf_writer) self.add_vocab(gguf_writer) + if self.special_vocab is not None: + self.special_vocab.add_to_gguf(gguf_writer) self.add_tensors(gguf_writer) print(" gguf: write header") gguf_writer.write_header_to_file() @@ -295,8 +298,10 @@ def handle_metadata(cfg, hp): else: raise ValueError('Unable to load metadata') vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) + # FIXME: Respect cfg.vocab_dir? + svocab = convert.SpecialVocab(cfg.model_metadata_dir) convert.check_vocab_size(params, vocab) - return (params, vocab) + return (params, vocab, svocab) def handle_args(): parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') @@ -323,14 +328,16 @@ def main(): print(f'* GGML model hyperparameters: {model.hyperparameters}') vocab_override = None params_override = None + special_vocab = None if cfg.model_metadata_dir is not None: - (params_override, vocab_override) = handle_metadata(cfg, model.hyperparameters) + (params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters) print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') print(f'* Overriding params: {params_override}') print(f'* Overriding vocab: {vocab_override}') + print(f'* Special vocab: {special_vocab}') else: print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') - converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override) + converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override, special_vocab = special_vocab) converter.save() print(f'* Successful completion. Output saved to: {cfg.output}') diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index e8da448caeaa3..5b70bb77d715f 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -13,6 +13,8 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor +from convert import SpecialVocab + #NDArray = np.ndarray[Any, Any] # compatible with python < 3.9 NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' @@ -189,62 +191,8 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) - -print("gguf: get special token ids") - -if Path(dir_model + "/tokenizer.json").is_file(): - # Look for special tokens in tokenizer.json if it exists - - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: - tokenizer = json.load(f) - - if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file(): - - with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: - tokenizer_config = json.load(f) - - if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["bos_token"]["content"]: - gguf_writer.add_bos_token_id(key["id"]) - - if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["eos_token"]["content"]: - gguf_writer.add_eos_token_id(key["id"]) - - if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["unk_token"]["content"]: - gguf_writer.add_unk_token_id(key["id"]) - - if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["sep_token"]["content"]: - gguf_writer.add_sep_token_id(key["id"]) - - if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None: - for key in tokenizer["added_tokens"]: - if key["content"] == tokenizer_config["pad_token"]["content"]: - gguf_writer.add_pad_token_id(key["id"]) -else: - # If no tokenizer.json: Look for special tokens in config.json - - if "bos_token_id" in hparams and hparams["bos_token_id"] != None: - gguf_writer.add_bos_token_id(hparams["bos_token_id"]) - - if "eos_token_id" in hparams and hparams["eos_token_id"] != None: - gguf_writer.add_eos_token_id(hparams["eos_token_id"]) - - if "unk_token_id" in hparams and hparams["unk_token_id"] != None: - gguf_writer.add_unk_token_id(hparams["unk_token_id"]) - - if "sep_token_id" in hparams and hparams["sep_token_id"] != None: - gguf_writer.add_sep_token_id(hparams["sep_token_id"]) - - if "pad_token_id" in hparams and hparams["pad_token_id"] != None: - gguf_writer.add_pad_token_id(hparams["pad_token_id"]) - +special_vocab = SpecialVocab(Path(dir_model)) +special_vocab.add_to_gguf(gguf_writer) # TENSORS From f82aec99a44801a0b1391e32099da4eb72551dd5 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Mon, 28 Aug 2023 08:56:57 -0600 Subject: [PATCH 09/17] First pass at implementing suggested changes --- convert-falcon-hf-to-gguf.py | 4 +- convert-gptneox-hf-to-gguf.py | 4 +- convert-llama-7b-pth-to-gguf.py | 4 +- convert-llama-ggmlv3-to-gguf.py | 2 +- convert-llama-hf-to-gguf.py | 4 +- convert.py | 84 ++---------------- gguf-py/gguf/gguf.py | 150 ++++++++++++++++++++++++-------- 7 files changed, 125 insertions(+), 127 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index de251a0fa4e27..3a0552a67e0ee 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -13,8 +13,6 @@ from pathlib import Path from transformers import AutoTokenizer -from convert import SpecialVocab - def bytes_to_unicode(): # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py """ @@ -161,7 +159,7 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = SpecialVocab(Path(dir_model)) +special_vocab = gguf.SpecialVocab(Path(dir_model)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index a7695655d0446..b81e0468adb15 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -13,8 +13,6 @@ from pathlib import Path from transformers import AutoTokenizer -from convert import SpecialVocab - # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py @@ -153,7 +151,7 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_list(tokens) -special_vocab = SpecialVocab(Path(dir_model)) +special_vocab = gguf.SpecialVocab(Path(dir_model)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 3c5506fe609bc..1400d770e95fe 100755 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -15,8 +15,6 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor -from convert import SpecialVocab - #NDArray = np.ndarray[Any, Any] # compatible with python < 3.9 NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' @@ -182,7 +180,7 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = SpecialVocab(Path(dir_model)) +special_vocab = gguf.SpecialVocab(Path(dir_model)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 78ba683ad6ae5..61e439d513bde 100755 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -299,7 +299,7 @@ def handle_metadata(cfg, hp): raise ValueError('Unable to load metadata') vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) # FIXME: Respect cfg.vocab_dir? - svocab = convert.SpecialVocab(cfg.model_metadata_dir) + svocab = gguf.SpecialVocab(cfg.model_metadata_dir) convert.check_vocab_size(params, vocab) return (params, vocab, svocab) diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index 5b70bb77d715f..472f707071dd4 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -13,8 +13,6 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor -from convert import SpecialVocab - #NDArray = np.ndarray[Any, Any] # compatible with python < 3.9 NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' @@ -191,7 +189,7 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) -special_vocab = SpecialVocab(Path(dir_model)) +special_vocab = gguf.SpecialVocab(Path(dir_model)) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert.py b/convert.py index 05904209ba0d5..dd50491377cfc 100755 --- a/convert.py +++ b/convert.py @@ -418,73 +418,6 @@ def __repr__(self) -> str: Vocab = Union[BpeVocab, SentencePieceVocab] -class SpecialVocab: - merges: List[str] = [] - special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) - special_token_ids: Dict[str, int] = {} - - def __init__(self, path: Path, special_token_types: Optional[Tuple[str, ...]] = None): - self.special_token_ids = {} - if special_token_types is not None: - self.special_token_types = special_token_types - self.load(path) - - def load(self, path: Path): - if not self.try_load_from_tokenizer_json(path): - self.try_load_from_config_json(path) - - def try_load_from_tokenizer_json(self, path: Path) -> bool: - tokenizer_file = path / 'tokenizer.json' - if not tokenizer_file.is_file(): - return False - with open(tokenizer_file, 'r', encoding = 'utf-8') as f: - tokenizer = json.load(f) - merges = tokenizer.get('model', {}).get('merges') - if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): - self.merges = merges - tokenizer_config_file = path / 'tokenizer_config.json' - added_tokens = tokenizer.get('added_tokens') - if added_tokens is None or not tokenizer_config_file.is_file(): - return True - with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f: - tokenizer_config = json.load(f) - for typ in self.special_token_types: - tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content') - if not isinstance(tc_content, str): - continue - for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): - if isinstance(maybe_token_id, int): - self.special_token_ids[typ] = maybe_token_id - break - return True - - def try_load_from_config_json(self, path: Path) -> bool: - config_file = path / 'config.json' - if not config_file.is_file(): - return False - with open(config_file, 'r', encoding = 'utf-8') as f: - config = json.load(f) - for typ in self.special_token_types: - maybe_token_id = config.get(f'{typ}_token_id') - if isinstance(maybe_token_id, int): - self.special_token_ids[typ] = maybe_token_id - return True - - def add_to_gguf(self, gw: gguf.GGUFWriter): - if len(self.merges) > 0: - print(f'SpecialVocab: Adding {len(self.merges)} merge(s).') - gw.add_token_merges(self.merges) - for typ, tokid in self.special_token_ids.items(): - handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None) - if handler is None: - print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping') - continue - print(f'SpecialVocab: Setting special token type {typ} to {tokid}') - handler(tokid) - - def __repr__(self): - return f'' - # # data loading # TODO: reuse (probably move to gguf.py?) @@ -514,7 +447,7 @@ def part(self, n_part: int) -> 'UnquantizedTensor': ... def to_ggml(self) -> 'GGMLCompatibleTensor': ... -def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray: +def bf16_to_fp32(bf16_arr: np.ndarray) -> NDArray: assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}" fp32_arr = bf16_arr.astype(np.uint32) << 16 return fp32_arr.view(np.float32) @@ -911,7 +844,7 @@ def add_meta_vocab(self, vocab: Vocab) -> None: self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) - def add_meta_special_vocab(self, svocab: SpecialVocab) -> None: + def add_meta_special_vocab(self, svocab: gguf.SpecialVocab) -> None: svocab.add_to_gguf(self.gguf) def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: @@ -932,7 +865,7 @@ def close(self) -> None: self.gguf.close() @staticmethod - def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab, svocab: SpecialVocab) -> None: + def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out) @@ -960,7 +893,7 @@ def maybe_do_quantize(item: Tuple[DataType, NDArray]) -> NDArray: return dt.quantize(arr) @staticmethod - def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: + def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: gguf.SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out) @@ -1014,7 +947,7 @@ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyM def convert_model_names(model: LazyModel, params: Params) -> LazyModel: tmap = gguf.TensorNameMap(ARCH, params.n_layer) - should_skip: Set[gguf.MODEL_TENSOR] = gguf.MODEL_TENSOR_SKIP.get(ARCH, set()) + should_skip: Set[gguf.MODEL_TENSOR] = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, [])) tmp = model @@ -1036,7 +969,7 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel: out: LazyModel = {} for name, lazy_tensor in model.items(): - tensor_type, name_new = tmap.get_both(name, try_suffixes = (".weight", ".bias")) or (None, None) + tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None) if name_new is None: raise Exception(f"Unexpected tensor name: {name}") @@ -1190,7 +1123,6 @@ def main(args_in: Optional[List[str]] = None) -> None: if not args.vocab_only: model_plus = load_some_model(args.model) else: - # You can no longer use guessed parameters for your vocab only model. Does anyone actually care? model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None) if args.dump: @@ -1220,7 +1152,7 @@ def main(args_in: Optional[List[str]] = None) -> None: assert args.outfile, "need --outfile if using --vocab-only" # FIXME: Try to respect vocab_dir somehow? vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) - special_vocab = SpecialVocab(model_plus.paths[0].parent) + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent) outfile = args.outfile OutputFile.write_vocab_only(outfile, params, vocab, special_vocab) print(f"Wrote {outfile}") @@ -1232,7 +1164,7 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent vocab = load_vocab(vocab_dir, args.vocabtype) # FIXME: Try to respect vocab_dir somehow? - special_vocab = SpecialVocab(model_plus.paths[0].parent) + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent) model = model_plus.model model = convert_model_names(model, params) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 684cf397d51fa..47bdb303a6b38 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -4,9 +4,13 @@ import struct import tempfile import numpy as np +import json +import os +from pathlib import Path +import collections.abc as collections_abc from enum import IntEnum, auto -from typing import Any, BinaryIO, IO, Dict, List, Optional, Sequence, Tuple +from typing import Any, BinaryIO, Callable, IO, Dict, List, Optional, Sequence, Tuple, Union # # constants @@ -317,7 +321,7 @@ def __init__(self, arch: MODEL_ARCH, n_blocks: int): key = key.format(bid = bid) mapping[key] = (tensor, tensor_name) - def get_both(self, key: str, try_suffixes: Sequence[str]) -> Optional[Tuple[MODEL_TENSOR, str]]: + def get_type_and_name(self, key: str, try_suffixes: Sequence[str]) -> Optional[Tuple[MODEL_TENSOR, str]]: result = self.mapping.get(key) if result is not None: return result @@ -329,11 +333,17 @@ def get_both(self, key: str, try_suffixes: Sequence[str]) -> Optional[Tuple[MODE return None def get_name(self, key: str, try_suffixes: Sequence[str]) -> Optional[str]: - result = self.get_both(key, try_suffixes = try_suffixes) + result = self.get_type_and_name(key, try_suffixes = try_suffixes) if result is None: return None return result[1] + def get_type(self, key: str, try_suffixes: Sequence[str]) -> Optional[MODEL_TENSOR]: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[0] + def __getitem__(self, key: str) -> str: try: return self.mapping[key][1] @@ -423,9 +433,9 @@ class GGUFWriter: ti_data_count = 0 use_temp_file: bool temp_file: Optional[tempfile.SpooledTemporaryFile[bytes]] = None - tensors: List[Tuple[np.ndarray, int]] + tensors: List[Tuple[np.ndarray[Any, Any], int]] - def __init__(self, path: str, arch: str, use_temp_file = True): + def __init__(self, path: Union[os.PathLike[str], str], arch: str, use_temp_file = True): self.fout = open(path, "wb") self.arch = arch self.add_architecture() @@ -501,13 +511,26 @@ def add_string(self, key: str, val: str): self.add_key(key) self.add_val(val, GGUFValueType.STRING) - def add_array(self, key: str, val: list): - if not isinstance(val, list): - raise ValueError("Value must be a list for array type") + def add_array(self, key: str, val: Sequence[Any]): + if not isinstance(val, collections_abc.Sequence): + raise ValueError("Value must be a sequence for array type") self.add_key(key) self.add_val(val, GGUFValueType.ARRAY) + _simple_value_packing = { + GGUFValueType.UINT8: " int: return ((x + n - 1) // n) * n - def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None): + def add_tensor_info(self, name: str, tensor_shape: Sequence[int], tensor_dtype: Union[np.dtype[np.float16], np.dtype[np.float32]], tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None): assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" encoded_name = name.encode("utf8") @@ -575,13 +579,14 @@ def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np. self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment) self.ti_data_count += 1 - def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarray] = None, raw_dtype: Optional[GGMLQuantizationType] = None): + def add_tensor(self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Optional[Sequence[int]] = None, raw_dtype: Optional[GGMLQuantizationType] = None): if self.use_temp_file and self.temp_file is None: fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) fp.seek(0) self.temp_file = fp - self.add_tensor_info(name, raw_shape if raw_shape is not None else tensor.shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) + shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape + self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes @@ -599,7 +604,7 @@ def write_padding(self, fp: BinaryIO, n: int, align: Optional[int] = None): if pad != 0: fp.write(bytes([0] * pad)) - def write_tensor_data(self, tensor: np.ndarray): + def write_tensor_data(self, tensor: np.ndarray[Any, Any]): self.write_padding(self.fout, self.fout.tell()) tensor.tofile(self.fout) self.write_padding(self.fout, tensor.nbytes) @@ -720,16 +725,16 @@ def add_rope_scale_linear(self, value: float): def add_tokenizer_model(self, model: str): self.add_string(KEY_TOKENIZER_MODEL, model) - def add_token_list(self, tokens: List): + def add_token_list(self, tokens: Union[Sequence[str], Sequence[bytes], Sequence[bytearray]]): self.add_array(KEY_TOKENIZER_LIST, tokens) - def add_token_merges(self, merges: List): + def add_token_merges(self, merges: Union[Sequence[str], Sequence[bytes], Sequence[bytearray]]): self.add_array(KEY_TOKENIZER_MERGES, merges) - def add_token_types(self, types: List[int]): + def add_token_types(self, types: Union[Sequence[TokenType], Sequence[int]]): self.add_array(KEY_TOKENIZER_TOKEN_TYPE, types) - def add_token_scores(self, scores: List[float]): + def add_token_scores(self, scores: Sequence[float]): self.add_array(KEY_TOKENIZER_SCORES, scores) def add_bos_token_id(self, id: int): @@ -748,6 +753,75 @@ def add_pad_token_id(self, id: int): self.add_uint32(KEY_TOKENIZER_PAD_ID, id) +class SpecialVocab: + merges: List[str] = [] + special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) + special_token_ids: Dict[str, int] = {} + + def __init__(self, path: Path, special_token_types: Optional[Tuple[str, ...]] = None): + self.special_token_ids = {} + if special_token_types is not None: + self.special_token_types = special_token_types + self.load(path) + + def load(self, path: Path): + if not self.try_load_from_tokenizer_json(path): + self.try_load_from_config_json(path) + + def try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer_file = path / 'tokenizer.json' + if not tokenizer_file.is_file(): + return False + with open(tokenizer_file, 'r', encoding = 'utf-8') as f: + tokenizer = json.load(f) + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): + self.merges = merges + tokenizer_config_file = path / 'tokenizer_config.json' + added_tokens = tokenizer.get('added_tokens') + if added_tokens is None or not tokenizer_config_file.is_file(): + return True + with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + for typ in self.special_token_types: + tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content') + if not isinstance(tc_content, str): + continue + for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + break + return True + + def try_load_from_config_json(self, path: Path) -> bool: + config_file = path / 'config.json' + if not config_file.is_file(): + return False + with open(config_file, 'r', encoding = 'utf-8') as f: + config = json.load(f) + for typ in self.special_token_types: + maybe_token_id = config.get(f'{typ}_token_id') + if isinstance(maybe_token_id, int): + self.special_token_ids[typ] = maybe_token_id + return True + + def add_to_gguf(self, gw: GGUFWriter): + # FIXME: Don't always include merges (possibly also don't even load them). + if len(self.merges) > 0: + print(f'SpecialVocab: Adding {len(self.merges)} merge(s).') + gw.add_token_merges(self.merges) + for typ, tokid in self.special_token_ids.items(): + handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None) + if handler is None: + print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping') + continue + print(f'SpecialVocab: Setting special token type {typ} to {tokid}') + handler(tokid) + + def __repr__(self): + return f'' + + # Example usage: if __name__ == "__main__": # Example usage with a file From 4a3d783d3e65f27db7330f1c3d3eb5e6b90bc5f9 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Mon, 28 Aug 2023 16:09:01 -0600 Subject: [PATCH 10/17] Second pass --- convert-gptneox-hf-to-gguf.py | 3 +-- convert-llama-ggmlv3-to-gguf.py | 2 +- convert-lora-to-ggml.py | 2 +- convert.py | 27 +++++++++++++++++---------- gguf-py/gguf/gguf.py | 22 +++++++++++----------- 5 files changed, 31 insertions(+), 25 deletions(-) diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index b81e0468adb15..f8141560aea0d 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -34,8 +34,7 @@ def bytes_to_unicode(): bs.append(b) cs.append(2**8+n) n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) + return dict(zip(bs, (chr(n) for n in cs))) def count_model_parts(dir_model: str) -> int: diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 61e439d513bde..c8e7f1761014f 100755 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -75,7 +75,7 @@ def __init__(self): self.dims = () self.dtype = None self.start_offset = 0 - self.len_bytes = 0 + self.len_bytes = np.int64(0) def load(self, data, offset): orig_offset = offset diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index 1112532bae12d..a00339b47493b 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -60,7 +60,7 @@ def write_file_header(fout: BinaryIO, params: Dict[str, Any]) -> None: def write_tensor_header( - self, name: str, shape: Sequence[int], data_type: np.dtype + self, name: str, shape: Sequence[int], data_type: np.dtype[Any] ) -> None: sname = name.encode("utf-8") fout.write( diff --git a/convert.py b/convert.py index dd50491377cfc..9fd8f43a43ccc 100755 --- a/convert.py +++ b/convert.py @@ -25,7 +25,7 @@ from abc import ABCMeta, abstractmethod from dataclasses import dataclass from pathlib import Path -from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Literal, Optional, Sequence, Set, Tuple, TypeVar, Union) +from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Literal, Optional, Sequence, Set, Tuple, Type, TypeVar, Union) from sentencepiece import SentencePieceProcessor # type: ignore if TYPE_CHECKING: @@ -447,7 +447,7 @@ def part(self, n_part: int) -> 'UnquantizedTensor': ... def to_ggml(self) -> 'GGMLCompatibleTensor': ... -def bf16_to_fp32(bf16_arr: np.ndarray) -> NDArray: +def bf16_to_fp32(bf16_arr: np.ndarray[Any, np.dtype[np.uint16]]) -> NDArray: assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}" fp32_arr = bf16_arr.astype(np.uint32) << 16 return fp32_arr.view(np.float32) @@ -658,7 +658,7 @@ def load(offset: int, elm_count: int) -> NDArray: description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}' return LazyStorage(load=load, kind=pid[1], description=description) - # @staticmethod + @staticmethod def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, # pyright: ignore[reportSelfClsParameterName] requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor: @@ -670,13 +670,15 @@ def load() -> UnquantizedTensor: description = f'pickled storage_offset={storage_offset} in {storage.description}' return LazyTensor(load, list(size), storage.kind.data_type, description) - # @staticmethod + @staticmethod def rebuild_from_type_v2(func, new_type, args, state): return func(*args) - CLASSES: Dict[Any, Any] = { - ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2, - ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2, + CLASSES: Dict[Tuple[str, str], Any] = { + # getattr used here as a workaround for mypy not being smart enough to detrmine + # the staticmethods have a __func__ attribute. + ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), + ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'), ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), @@ -752,7 +754,7 @@ def lazy_load_file(path: Path) -> ModelPlus: In = TypeVar('In') Out = TypeVar('Out') -def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int, max_workers: Optional[int] = None, factory: Callable = ThreadPoolExecutor) -> Iterable[Out]: +def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int, max_workers: Optional[int] = None, use_processpool_executor: bool = False) -> Iterable[Out]: '''Parallel map, but with backpressure. If the caller doesn't call `next` fast enough, this will stop calling `func` at some point rather than letting results pile up in memory. Specifically, there is a max of one @@ -761,7 +763,12 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc yield from map(func, iterable) # Not reached. iterable = iter(iterable) - with factory(max_workers = max_workers) as executor: + executor_class: Union[Type[ThreadPoolExecutor], Type[ProcessPoolExecutor]] + if use_processpool_executor: + executor_class = ProcessPoolExecutor + else: + executor_class = ThreadPoolExecutor + with executor_class(max_workers = max_workers) as executor: futures: List[concurrent.futures.Future[Out]] = [] done = False for _ in range(concurrency): @@ -913,7 +920,7 @@ def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyM # tensor data ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency = concurrency) if ftype == GGMLFileType.MostlyQ8_0: - ndarrays = bounded_parallel_map(OutputFile.maybe_do_quantize, ndarrays_inner, concurrency = concurrency, max_workers = concurrency, factory = ProcessPoolExecutor) + ndarrays = bounded_parallel_map(OutputFile.maybe_do_quantize, ndarrays_inner, concurrency = concurrency, max_workers = concurrency, use_processpool_executor = True) else: ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 47bdb303a6b38..1f8f7098fedd4 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -8,7 +8,6 @@ import os from pathlib import Path -import collections.abc as collections_abc from enum import IntEnum, auto from typing import Any, BinaryIO, Callable, IO, Dict, List, Optional, Sequence, Tuple, Union @@ -512,7 +511,7 @@ def add_string(self, key: str, val: str): self.add_val(val, GGUFValueType.STRING) def add_array(self, key: str, val: Sequence[Any]): - if not isinstance(val, collections_abc.Sequence): + if not isinstance(val, Sequence): raise ValueError("Value must be a sequence for array type") self.add_key(key) @@ -546,15 +545,16 @@ def add_val(self, val: Any, vtype: Optional[GGUFValueType] = None, add_vtype: bo encoded_val = val.encode("utf8") if isinstance(val, str) else val self.kv_data += struct.pack(" 0: + ltype = GGUFValueType.get_type(val[0]) + if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]): + raise ValueError("All items in a GGUF array should be of the same type") + self.kv_data += struct.pack(" int: @@ -614,7 +614,7 @@ def write_tensors_to_file(self): self.write_padding(self.fout, self.fout.tell()) - if not self.use_temp_file: + if self.temp_file is None: for (currtensor, currpad) in self.tensors: currtensor.tofile(self.fout) if currpad != 0: @@ -808,14 +808,14 @@ def try_load_from_config_json(self, path: Path) -> bool: def add_to_gguf(self, gw: GGUFWriter): # FIXME: Don't always include merges (possibly also don't even load them). if len(self.merges) > 0: - print(f'SpecialVocab: Adding {len(self.merges)} merge(s).') + print(f'gguf: Adding {len(self.merges)} merge(s).') gw.add_token_merges(self.merges) for typ, tokid in self.special_token_ids.items(): handler: Optional[Callable[[int], None]] = getattr(gw, f'add_{typ}_token_id', None) if handler is None: - print(f'SpecialVocab: WARNING: No handler for special token type {typ} with id {tokid} - skipping') + print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping') continue - print(f'SpecialVocab: Setting special token type {typ} to {tokid}') + print(f'gguf: Setting special token type {typ} to {tokid}') handler(tokid) def __repr__(self): From 8534197f14df1841df08f3b21cf7386e9c47af58 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 04:19:18 -0600 Subject: [PATCH 11/17] gguf: SpecialVocab: Fix issue with special token content not in a dict gguf: SpecialVocab: Allow skipping handling of merges --- gguf-py/gguf/gguf.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 1f8f7098fedd4..5f377187eab7b 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -754,11 +754,12 @@ def add_pad_token_id(self, id: int): class SpecialVocab: + load_merges: bool = False merges: List[str] = [] special_token_types: Tuple[str, ...] = tuple(('bos', 'eos', 'unk', 'sep', 'pad')) special_token_ids: Dict[str, int] = {} - def __init__(self, path: Path, special_token_types: Optional[Tuple[str, ...]] = None): + def __init__(self, path: Path, load_merges: bool = False, special_token_types: Optional[Tuple[str, ...]] = None): self.special_token_ids = {} if special_token_types is not None: self.special_token_types = special_token_types @@ -774,9 +775,10 @@ def try_load_from_tokenizer_json(self, path: Path) -> bool: return False with open(tokenizer_file, 'r', encoding = 'utf-8') as f: tokenizer = json.load(f) - merges = tokenizer.get('model', {}).get('merges') - if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): - self.merges = merges + if self.load_merges: + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): + self.merges = merges tokenizer_config_file = path / 'tokenizer_config.json' added_tokens = tokenizer.get('added_tokens') if added_tokens is None or not tokenizer_config_file.is_file(): @@ -784,8 +786,15 @@ def try_load_from_tokenizer_json(self, path: Path) -> bool: with open(tokenizer_config_file, 'r', encoding = 'utf-8') as f: tokenizer_config = json.load(f) for typ in self.special_token_types: - tc_content = (tokenizer_config.get(f'{typ}_token') or {}).get('content') - if not isinstance(tc_content, str): + entry = tokenizer_config.get(f'{typ}_token') + if isinstance(entry, str): + tc_content = entry + elif isinstance(entry, dict): + entry_content = entry.get('content') + if not isinstance(entry_content, str): + continue + tc_content = entry_content + else: continue for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content): if isinstance(maybe_token_id, int): @@ -806,7 +815,6 @@ def try_load_from_config_json(self, path: Path) -> bool: return True def add_to_gguf(self, gw: GGUFWriter): - # FIXME: Don't always include merges (possibly also don't even load them). if len(self.merges) > 0: print(f'gguf: Adding {len(self.merges)} merge(s).') gw.add_token_merges(self.merges) From 61911ca4dba4faeadb4351cd20b3c978b14a3e35 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 04:21:01 -0600 Subject: [PATCH 12/17] convert-falcon-hf-to-gguf: Support --vocab-only option, bail out if no tokenizer.json --- convert-falcon-hf-to-gguf.py | 143 +++++++++++++++++++---------------- 1 file changed, 76 insertions(+), 67 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 3a0552a67e0ee..a13905b77ed30 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -8,6 +8,7 @@ import json import numpy as np import torch +import argparse from typing import Any, List from pathlib import Path @@ -47,16 +48,21 @@ def count_model_parts(dir_model: str) -> int: return num_parts -if len(sys.argv) < 3: - print(f"Usage: python {sys.argv[0]} dir-model ftype\n") - print(" ftype == 0 -> float32") - print(" ftype == 1 -> float16") - sys.exit(1) +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Convert a Falcon model to a GGML compatible file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") + parser.add_argument("ftype", type=int, choices=[0, 1], help="output format - use 0 for float32, 1 for float16", default = 1) + return parser.parse_args() +args = parse_args() -# output in the same directory as the model -dir_model = sys.argv[1] -last_dir = os.path.basename(os.path.normpath(dir_model)) +dir_model = args.model +ftype = args.ftype +if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file = sys.stderr) + sys.exit(1) # possible tensor data types # ftype == 0 -> float32 @@ -65,25 +71,21 @@ def count_model_parts(dir_model: str) -> int: # map from ftype to string ftype_str = ["f32", "f16"] -ftype = 1 -if len(sys.argv) > 2: - ftype = int(sys.argv[2]) - if ftype < 0 or ftype > 1: - print("Invalid ftype: " + str(ftype)) - - sys.exit(1) - -fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" +if args.outfile is not None: + fname_out = args.outfile +else: + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' -print("gguf: loading model "+last_dir) +print("gguf: loading model "+dir_model.name) -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: +with open(dir_model / "config.json", "r", encoding="utf-8") as f: hparams = json.load(f) if hparams["architectures"][0] != "RWForCausalLM": print("Model architecture not supported: " + hparams["architectures"][0]) - sys.exit() + sys.exit(1) # get number of model parts num_parts = count_model_parts(dir_model) @@ -117,49 +119,53 @@ def count_model_parts(dir_model: str) -> int: scores: List[float] = [] toktypes: List[int] = [] -if Path(dir_model + "/tokenizer.json").is_file(): - # gpt2 tokenizer - gguf_writer.add_tokenizer_model("gpt2") - - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) - - print("gguf: get gpt2 tokenizer vocab") - - vocab_size = len(tokenizer_json["model"]["vocab"]) - - # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py - tokenizer = AutoTokenizer.from_pretrained(dir_model) - - reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - byte_encoder = bytes_to_unicode() - byte_decoder = {v: k for k, v in byte_encoder.items()} - - for i in range(vocab_size): - if i in reverse_vocab: - try: - text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) - except KeyError: - text = bytearray() - for c in reverse_vocab[i]: - if ord(c) < 256: # single byte character - text.append(byte_decoder[ord(c)]) - else: # multibyte special token character - text.extend(c.encode('utf-8')) - else: - print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") - pad_token = f"[PAD{i}]".encode("utf8") - text = bytearray(pad_token) - - tokens.append(text) - scores.append(0.0) # dymmy - toktypes.append(gguf.TokenType.NORMAL) # dummy - - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(Path(dir_model)) +tokenizer_json_file = dir_model / 'tokenizer.json' +if not tokenizer_json_file.is_file(): + print(f'Error: Missing {tokenizer_json_file}', file = sys.stderr) + sys.exit(1) + +# gpt2 tokenizer +gguf_writer.add_tokenizer_model("gpt2") + +with open(tokenizer_json_file, "r", encoding="utf-8") as f: + tokenizer_json = json.load(f) + +print("gguf: get gpt2 tokenizer vocab") + +vocab_size = len(tokenizer_json["model"]["vocab"]) + +# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py +tokenizer = AutoTokenizer.from_pretrained(dir_model) + +reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} +byte_encoder = bytes_to_unicode() +byte_decoder = {v: k for k, v in byte_encoder.items()} + +for i in range(vocab_size): + if i in reverse_vocab: + try: + text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) + except KeyError: + text = bytearray() + for c in reverse_vocab[i]: + if ord(c) < 256: # single byte character + text.append(byte_decoder[ord(c)]) + else: # multibyte special token character + text.extend(c.encode('utf-8')) + else: + print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") + pad_token = f"[PAD{i}]".encode("utf8") + text = bytearray(pad_token) + + tokens.append(text) + scores.append(0.0) # dymmy + toktypes.append(gguf.TokenType.NORMAL) # dummy + +gguf_writer.add_token_list(tokens) +gguf_writer.add_token_scores(scores) +gguf_writer.add_token_types(toktypes) + +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) special_vocab.add_to_gguf(gguf_writer) # TENSORS @@ -183,8 +189,10 @@ def count_model_parts(dir_model: str) -> int: ) for part_name in part_names: + if args.vocab_only: + break print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") + model_part = torch.load(dir_model / part_name, map_location="cpu") for name in model_part.keys(): data = model_part[name] @@ -244,10 +252,11 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.write_header_to_file() print("gguf: write metadata") gguf_writer.write_kv_data_to_file() -print("gguf: write tensors") -gguf_writer.write_tensors_to_file() +if not args.vocab_only: + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() gguf_writer.close() -print("gguf: model successfully exported to '" + fname_out + "'") +print(f"gguf: model successfully exported to '{fname_out}'") print("") From 0c620ef63be5be416f0d84d70d863b748c29be14 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 04:23:09 -0600 Subject: [PATCH 13/17] convert-gptneox-hf-to-gguf and convert: Only handle merges for BPE tokenizer --- convert-gptneox-hf-to-gguf.py | 2 +- convert.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index f8141560aea0d..9c78fe14e9f55 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -150,7 +150,7 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.add_token_list(tokens) -special_vocab = gguf.SpecialVocab(Path(dir_model)) +special_vocab = gguf.SpecialVocab(Path(dir_model), load_merges = True) special_vocab.add_to_gguf(gguf_writer) # TENSORS diff --git a/convert.py b/convert.py index 9fd8f43a43ccc..27d187daca9fa 100755 --- a/convert.py +++ b/convert.py @@ -1159,7 +1159,7 @@ def main(args_in: Optional[List[str]] = None) -> None: assert args.outfile, "need --outfile if using --vocab-only" # FIXME: Try to respect vocab_dir somehow? vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) - special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent) + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe') outfile = args.outfile OutputFile.write_vocab_only(outfile, params, vocab, special_vocab) print(f"Wrote {outfile}") @@ -1171,7 +1171,7 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent vocab = load_vocab(vocab_dir, args.vocabtype) # FIXME: Try to respect vocab_dir somehow? - special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent) + special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe') model = model_plus.model model = convert_model_names(model, params) From 2ea133895c6d77652f7e27607af5594ada5e29f7 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 04:33:24 -0600 Subject: [PATCH 14/17] gguf: SpecialVocab: Actually set load_merges in object --- gguf-py/gguf/gguf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 5f377187eab7b..c6f4895acdd89 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -761,6 +761,7 @@ class SpecialVocab: def __init__(self, path: Path, load_merges: bool = False, special_token_types: Optional[Tuple[str, ...]] = None): self.special_token_ids = {} + self.load_merges = load_merges if special_token_types is not None: self.special_token_types = special_token_types self.load(path) From 58fa4dc87012a142be9e57090fa556a72da436ed Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 10:47:47 -0600 Subject: [PATCH 15/17] Uniform args parsing and vocab only mode for convert examples --- convert-falcon-hf-to-gguf.py | 5 +- convert-gptneox-hf-to-gguf.py | 117 +++++++++++++++------------- convert-llama-7b-pth-to-gguf.py | 133 +++++++++++++++++--------------- convert-llama-hf-to-gguf.py | 132 ++++++++++++++++--------------- 4 files changed, 204 insertions(+), 183 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index a13905b77ed30..0fdea70e1a841 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -33,11 +33,10 @@ def bytes_to_unicode(): bs.append(b) cs.append(2**8+n) n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) + return dict(zip(bs, (chr(n) for n in cs))) -def count_model_parts(dir_model: str) -> int: +def count_model_parts(dir_model: Path) -> int: num_parts = 0 for filename in os.listdir(dir_model): if filename.startswith("pytorch_model-"): diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index 9c78fe14e9f55..38e71e03bf090 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -8,6 +8,7 @@ import json import numpy as np import torch +import argparse from typing import Any, List from pathlib import Path @@ -37,7 +38,7 @@ def bytes_to_unicode(): return dict(zip(bs, (chr(n) for n in cs))) -def count_model_parts(dir_model: str) -> int: +def count_model_parts(dir_model: Path) -> int: num_parts = 0 for filename in os.listdir(dir_model): if filename.startswith("pytorch_model-"): @@ -48,16 +49,21 @@ def count_model_parts(dir_model: str) -> int: return num_parts -if len(sys.argv) < 3: - print(f"Usage: python {sys.argv[0]} dir-model ftype\n") - print(" ftype == 0 -> float32") - print(" ftype == 1 -> float16") - sys.exit(1) +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Convert a GPT-NeoX model to a GGML compatible file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") + parser.add_argument("ftype", type=int, choices=[0, 1], help="output format - use 0 for float32, 1 for float16", default = 1) + return parser.parse_args() +args = parse_args() -# output in the same directory as the model -dir_model = sys.argv[1] -last_dir = os.path.basename(os.path.normpath(dir_model)) +dir_model = args.model +ftype = args.ftype +if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file = sys.stderr) + sys.exit(1) # possible tensor data types # ftype == 0 -> float32 @@ -66,19 +72,15 @@ def count_model_parts(dir_model: str) -> int: # map from ftype to string ftype_str = ["f32", "f16"] -ftype = 1 -if len(sys.argv) > 2: - ftype = int(sys.argv[2]) - if ftype < 0 or ftype > 1: - print("Invalid ftype: " + str(ftype)) - - sys.exit(1) - -fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" +if args.outfile is not None: + fname_out = args.outfile +else: + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' -print("gguf: loading model "+last_dir) +print("gguf: loading model "+dir_model.name) -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: +with open(dir_model / "config.json", "r", encoding="utf-8") as f: hparams = json.load(f) if hparams["architectures"][0] != "GPTNeoXForCausalLM": @@ -96,7 +98,7 @@ def count_model_parts(dir_model: str) -> int: block_count = hparams["num_hidden_layers"] -gguf_writer.add_name(last_dir) +gguf_writer.add_name(dir_model.name) gguf_writer.add_context_length(hparams["max_position_embeddings"]) gguf_writer.add_embedding_length(hparams["hidden_size"]) gguf_writer.add_block_count(block_count) @@ -112,45 +114,49 @@ def count_model_parts(dir_model: str) -> int: tokens: List[bytearray] = [] -if Path(dir_model + "/tokenizer.json").is_file(): - # gpt2 tokenizer - gguf_writer.add_tokenizer_model("gpt2") +tokenizer_json_file = dir_model / 'tokenizer.json' +if not tokenizer_json_file.is_file(): + print(f'Error: Missing {tokenizer_json_file}', file = sys.stderr) + sys.exit(1) + +# gpt2 tokenizer +gguf_writer.add_tokenizer_model("gpt2") - with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) +with open(tokenizer_json_file, "r", encoding="utf-8") as f: + tokenizer_json = json.load(f) - print("gguf: get gpt2 tokenizer vocab") +print("gguf: get gpt2 tokenizer vocab") - vocab_size = len(tokenizer_json["model"]["vocab"]) +vocab_size = len(tokenizer_json["model"]["vocab"]) - # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py - tokenizer = AutoTokenizer.from_pretrained(dir_model) +# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py +tokenizer = AutoTokenizer.from_pretrained(dir_model) - reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - byte_encoder = bytes_to_unicode() - byte_decoder = {v: k for k, v in byte_encoder.items()} +reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} +byte_encoder = bytes_to_unicode() +byte_decoder = {v: k for k, v in byte_encoder.items()} - for i in range(vocab_size): - if i in reverse_vocab: - try: - text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) - except KeyError: - text = bytearray() - for c in reverse_vocab[i]: - if ord(c) < 256: # single byte character - text.append(byte_decoder[ord(c)]) - else: # multibyte special token character - text.extend(c.encode('utf-8')) - else: - print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") - pad_token = f"[PAD{i}]".encode("utf8") - text = bytearray(pad_token) +for i in range(vocab_size): + if i in reverse_vocab: + try: + text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) + except KeyError: + text = bytearray() + for c in reverse_vocab[i]: + if ord(c) < 256: # single byte character + text.append(byte_decoder[ord(c)]) + else: # multibyte special token character + text.extend(c.encode('utf-8')) + else: + print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") + pad_token = f"[PAD{i}]".encode("utf8") + text = bytearray(pad_token) - tokens.append(text) + tokens.append(text) - gguf_writer.add_token_list(tokens) +gguf_writer.add_token_list(tokens) -special_vocab = gguf.SpecialVocab(Path(dir_model), load_merges = True) +special_vocab = gguf.SpecialVocab(dir_model, load_merges = True) special_vocab.add_to_gguf(gguf_writer) # TENSORS @@ -168,6 +174,8 @@ def count_model_parts(dir_model: str) -> int: ) for part_name in part_names: + if args.vocab_only: + break print("gguf: loading model part '" + part_name + "'") model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") @@ -216,10 +224,11 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.write_header_to_file() print("gguf: write metadata") gguf_writer.write_kv_data_to_file() -print("gguf: write tensors") -gguf_writer.write_tensors_to_file() +if not args.vocab_only: + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() gguf_writer.close() -print("gguf: model successfully exported to '" + fname_out + "'") +print(f"gguf: model successfully exported to '{fname_out}'") print("") diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 1400d770e95fe..6e973a116144e 100755 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -10,6 +10,7 @@ import json import numpy as np import torch +import argparse from typing import Any, List, TypeAlias from pathlib import Path @@ -20,7 +21,7 @@ NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' -def count_model_parts(dir_model: str) -> int: +def count_model_parts(dir_model: Path) -> int: num_parts = 0 for filename in os.listdir(dir_model): if filename.startswith("consolidated."): @@ -31,18 +32,21 @@ def count_model_parts(dir_model: str) -> int: return num_parts -if len(sys.argv) < 3: - print(f"Usage: python {sys.argv[0]} dir-model ftype\n") - print(" ftype == 0 -> float32") - print(" ftype == 1 -> float16") +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Convert a PyTorch 7B LLaMA model to a GGML compatible file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") + parser.add_argument("ftype", type=int, choices=[0, 1], help="output format - use 0 for float32, 1 for float16", default = 1) + return parser.parse_args() - sys.exit(1) - - -# output in the same directory as the model -dir_model = sys.argv[1] -last_dir = os.path.basename(os.path.normpath(dir_model)) +args = parse_args() +dir_model = args.model +ftype = args.ftype +if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file = sys.stderr) + sys.exit(1) # possible tensor data types # ftype == 0 -> float32 @@ -51,19 +55,15 @@ def count_model_parts(dir_model: str) -> int: # map from ftype to string ftype_str = ["f32", "f16"] -ftype = 1 -if len(sys.argv) > 2: - ftype = int(sys.argv[2]) - if ftype < 0 or ftype > 1: - print("Invalid ftype: " + str(ftype)) - - sys.exit(1) - -fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" +if args.outfile is not None: + fname_out = args.outfile +else: + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' -print("gguf: loading model "+last_dir) +print("gguf: loading model "+dir_model.name) -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: +with open(dir_model / "config.json", "r", encoding="utf-8") as f: hparams = json.load(f) if hparams["architectures"][0] != "LlamaForCausalLM": @@ -107,7 +107,7 @@ def count_model_parts(dir_model: str) -> int: sys.exit() -gguf_writer.add_name(last_dir) +gguf_writer.add_name(dir_model.name) gguf_writer.add_source_hf_repo(hf_repo) gguf_writer.add_tensor_data_layout("Meta AI original pth") gguf_writer.add_context_length(ctx_length) @@ -133,54 +133,59 @@ def count_model_parts(dir_model: str) -> int: scores: List[float] = [] toktypes: List[int] = [] -if Path(dir_model + "/tokenizer.model").is_file(): - # vocab type sentencepiece - print("gguf: get sentencepiece tokenizer vocab and scores") +tokenizer_model_file = dir_model / 'tokenizer.model' +if not tokenizer_model_file.is_file(): + print(f'Error: Missing {tokenizer_model_file}', file = sys.stderr) + sys.exit(1) - tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model") +# vocab type sentencepiece +print("gguf: get sentencepiece tokenizer vocab and scores") - for i in range(tokenizer.vocab_size()): - text: bytes - score: float +tokenizer = SentencePieceProcessor(str(tokenizer_model_file)) - piece = tokenizer.id_to_piece(i) - text = piece.encode("utf-8") - score = tokenizer.get_score(i) +for i in range(tokenizer.vocab_size()): + text: bytes + score: float - toktype = 1 # defualt to normal token type - if tokenizer.is_unknown(i): - toktype = 2 - if tokenizer.is_control(i): - toktype = 3 + piece = tokenizer.id_to_piece(i) + text = piece.encode("utf-8") + score = tokenizer.get_score(i) - # toktype = 4 is user-defined = tokens from added_tokens.json + toktype = 1 # defualt to normal token type + if tokenizer.is_unknown(i): + toktype = 2 + if tokenizer.is_control(i): + toktype = 3 - if tokenizer.is_unused(i): - toktype = 5 - if tokenizer.is_byte(i): - toktype = 6 + # toktype = 4 is user-defined = tokens from added_tokens.json - tokens.append(text) - scores.append(score) - toktypes.append(toktype) + if tokenizer.is_unused(i): + toktype = 5 + if tokenizer.is_byte(i): + toktype = 6 - if Path(dir_model + "/added_tokens.json").is_file(): - with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: - addtokens_json = json.load(f) + tokens.append(text) + scores.append(score) + toktypes.append(toktype) - print("gguf: get added tokens") +added_tokens_file = dir_model / 'added_tokens.json' +if added_tokens_file.is_file(): + with open(added_tokens_file, "r", encoding="utf-8") as f: + addtokens_json = json.load(f) - for key in addtokens_json: - tokens.append( key.encode("utf-8") ) - scores.append(-1000.0) - toktypes.append(4) # user-defined token type + print("gguf: get added tokens") - gguf_writer.add_tokenizer_model("llama") - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) + for key in addtokens_json: + tokens.append( key.encode("utf-8") ) + scores.append(-1000.0) + toktypes.append(4) # user-defined token type -special_vocab = gguf.SpecialVocab(Path(dir_model)) +gguf_writer.add_tokenizer_model("llama") +gguf_writer.add_token_list(tokens) +gguf_writer.add_token_scores(scores) +gguf_writer.add_token_types(toktypes) + +special_vocab = gguf.SpecialVocab(dir_model) special_vocab.add_to_gguf(gguf_writer) # TENSORS @@ -193,6 +198,8 @@ def count_model_parts(dir_model: str) -> int: part_names = (f"consolidated.{n:02}.pth" for n in range(0, num_parts)) for part_name in part_names: + if args.vocab_only: + break print("gguf: loading model part '" + part_name + "'") model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") @@ -241,11 +248,11 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.write_header_to_file() print("gguf: write metadata") gguf_writer.write_kv_data_to_file() -print("gguf: write tensors") -gguf_writer.write_tensors_to_file() +if not args.vocab_only: + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() gguf_writer.close() - -print("gguf: model successfully exported to '" + fname_out + "'") +print(f"gguf: model successfully exported to '{fname_out}'") print("") diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index 472f707071dd4..ab94b5eab695d 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -8,6 +8,7 @@ import json import numpy as np import torch +import argparse from typing import Any, List, Optional, TypeAlias from pathlib import Path @@ -43,40 +44,38 @@ def count_model_parts(dir_model: str) -> int: return num_parts -if len(sys.argv) < 3: - print(f"Usage: python {sys.argv[0]} dir-model ftype\n") - print(" ftype == 0 -> float32") - print(" ftype == 1 -> float16") +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Convert a HuggingFace LLaMA model to a GGML compatible file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") + parser.add_argument("ftype", type=int, choices=[0, 1], help="output format - use 0 for float32, 1 for float16", default = 1) + return parser.parse_args() - sys.exit(1) - - -# output in the same directory as the model -dir_model = sys.argv[1] -last_dir = os.path.basename(os.path.normpath(dir_model)) +args = parse_args() +dir_model = args.model +ftype = args.ftype +if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file = sys.stderr) + sys.exit(1) # possible tensor data types # ftype == 0 -> float32 # ftype == 1 -> float16 - # map from ftype to string ftype_str = ["f32", "f16"] -ftype = 1 -if len(sys.argv) > 2: - ftype = int(sys.argv[2]) - if ftype < 0 or ftype > 1: - print("Invalid ftype: " + str(ftype)) - - sys.exit(1) - -fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" +if args.outfile is not None: + fname_out = args.outfile +else: + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' -print("gguf: loading model "+last_dir) +print("gguf: loading model "+dir_model.name) -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: +with open(dir_model / "config.json", "r", encoding="utf-8") as f: hparams = json.load(f) if hparams["architectures"][0] != "LlamaForCausalLM": @@ -115,7 +114,7 @@ def count_model_parts(dir_model: str) -> int: sys.exit() -gguf_writer.add_name(last_dir) +gguf_writer.add_name(dir_model.name) gguf_writer.add_source_hf_repo(hf_repo) gguf_writer.add_tensor_data_layout("Meta AI original pth") gguf_writer.add_context_length(ctx_length) @@ -141,55 +140,60 @@ def count_model_parts(dir_model: str) -> int: scores: List[float] = [] toktypes: List[int] = [] -if Path(dir_model + "/tokenizer.model").is_file(): - # vocab type sentencepiece - print("gguf: get sentencepiece tokenizer vocab, scores and token types") +tokenizer_model_file = dir_model / 'tokenizer.model' +if not tokenizer_model_file.is_file(): + print(f'Error: Missing {tokenizer_model_file}', file = sys.stderr) + sys.exit(1) - tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model") +# vocab type sentencepiece +print("gguf: get sentencepiece tokenizer vocab, scores and token types") - for i in range(tokenizer.vocab_size()): - text: bytes - score: float +tokenizer = SentencePieceProcessor(str(tokenizer_model_file)) - piece = tokenizer.id_to_piece(i) - text = piece.encode("utf-8") - score = tokenizer.get_score(i) +for i in range(tokenizer.vocab_size()): + text: bytes + score: float - toktype = 1 # defualt to normal token type - if tokenizer.is_unknown(i): - toktype = 2 - if tokenizer.is_control(i): - toktype = 3 + piece = tokenizer.id_to_piece(i) + text = piece.encode("utf-8") + score = tokenizer.get_score(i) - # toktype = 4 is user-defined = tokens from added_tokens.json + toktype = 1 # defualt to normal token type + if tokenizer.is_unknown(i): + toktype = 2 + if tokenizer.is_control(i): + toktype = 3 - if tokenizer.is_unused(i): - toktype = 5 - if tokenizer.is_byte(i): - toktype = 6 + # toktype = 4 is user-defined = tokens from added_tokens.json - tokens.append(text) - scores.append(score) - toktypes.append(toktype) + if tokenizer.is_unused(i): + toktype = 5 + if tokenizer.is_byte(i): + toktype = 6 - if Path(dir_model + "/added_tokens.json").is_file(): - with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: - addtokens_json = json.load(f) + tokens.append(text) + scores.append(score) + toktypes.append(toktype) - print("gguf: get added tokens") +added_tokens_file = dir_model / 'added_tokens.json' +if added_tokens_file.is_file(): + with open(added_tokens_file, "r", encoding="utf-8") as f: + addtokens_json = json.load(f) - for key in addtokens_json: - tokens.append( key.encode("utf-8") ) - scores.append(-1000.0) - toktypes.append(4) # user-defined token type + print("gguf: get added tokens") + for key in addtokens_json: + tokens.append( key.encode("utf-8") ) + scores.append(-1000.0) + toktypes.append(4) # user-defined token type - gguf_writer.add_tokenizer_model("llama") - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(Path(dir_model)) +gguf_writer.add_tokenizer_model("llama") +gguf_writer.add_token_list(tokens) +gguf_writer.add_token_scores(scores) +gguf_writer.add_token_types(toktypes) + +special_vocab = gguf.SpecialVocab(dir_model) special_vocab.add_to_gguf(gguf_writer) # TENSORS @@ -207,6 +211,8 @@ def count_model_parts(dir_model: str) -> int: ) for part_name in part_names: + if args.vocab_only: + break print("gguf: loading model part '" + part_name + "'") model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") @@ -261,11 +267,11 @@ def count_model_parts(dir_model: str) -> int: gguf_writer.write_header_to_file() print("gguf: write metadata") gguf_writer.write_kv_data_to_file() -print("gguf: write tensors") -gguf_writer.write_tensors_to_file() +if not args.vocab_only: + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() gguf_writer.close() - -print("gguf: model successfully exported to '" + fname_out + "'") +print(f"gguf: model successfully exported to '{fname_out}'") print("") From ce005285aadd87403f4f6c69f1516ef2801d48ae Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 12:01:59 -0600 Subject: [PATCH 16/17] convert.py: Set gpt2 as tokenizer model when using BPE --- convert.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/convert.py b/convert.py index 27d187daca9fa..448b6f0f35ba7 100755 --- a/convert.py +++ b/convert.py @@ -846,7 +846,12 @@ def add_meta_vocab(self, vocab: Vocab) -> None: scores.append(score) toktypes.append(toktype) - self.gguf.add_tokenizer_model("llama") + if isinstance(vocab, SentencePieceVocab): + self.gguf.add_tokenizer_model("llama") + elif isinstance(vocab, BpeVocab): + self.gguf.add_tokenizer_model("gpt2") + else: + raise ValueError(f'Unknown vocab type: Not BpeVocab or SentencePieceVocab') self.gguf.add_token_list(tokens) self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) From d77b74b41e096af3be970f94672d4e8dcbb0a455 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 29 Aug 2023 14:01:05 -0600 Subject: [PATCH 17/17] Squish last type warning in gguf.py - yay! --- gguf-py/gguf/gguf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index c6f4895acdd89..de3edbc993700 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -9,6 +9,7 @@ from pathlib import Path from enum import IntEnum, auto +from io import BufferedWriter from typing import Any, BinaryIO, Callable, IO, Dict, List, Optional, Sequence, Tuple, Union # @@ -422,7 +423,7 @@ def get_type(val): class GGUFWriter: - fout: BinaryIO + fout: BufferedWriter arch: str offset_tensor = 0 data_alignment = GGUF_DEFAULT_ALIGNMENT