]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert : remove AWQ remnants (#8320)
authorGeorgi Gerganov <redacted>
Fri, 5 Jul 2024 07:15:36 +0000 (10:15 +0300)
committerGitHub <redacted>
Fri, 5 Jul 2024 07:15:36 +0000 (10:15 +0300)
convert_hf_to_gguf.py

index ed5490593204404cfe94142f9d492ee5feaa61bc..455eea883b93b25fd0f114386b49dacc45a3972f 100755 (executable)
@@ -2445,7 +2445,7 @@ class Gemma2Model(Model):
             raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")
 
     def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
-        del bid  # unusem
+        del bid  # unused
 
         # lm_head is not used in llama.cpp, while autoawq will include this tensor in model
         # To prevent errors, skip loading lm_head.weight.
@@ -3225,10 +3225,6 @@ def parse_args() -> argparse.Namespace:
         "--vocab-only", action="store_true",
         help="extract only the vocab",
     )
-    parser.add_argument(
-        "--awq-path", type=Path, default=None,
-        help="Path to scale awq cache file",
-    )
     parser.add_argument(
         "--outfile", type=Path,
         help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
@@ -3306,19 +3302,6 @@ def main() -> None:
 
     dir_model = args.model
 
-    if args.awq_path:
-        sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
-        from awq.apply_awq import add_scale_weights  # type: ignore[import-not-found]
-        tmp_model_path = args.model / "weighted_model"
-        dir_model = tmp_model_path
-        if tmp_model_path.is_dir():
-            logger.info(f"{tmp_model_path} exists as a weighted model.")
-        else:
-            tmp_model_path.mkdir(parents=True, exist_ok=True)
-            logger.info("Saving new weighted model ...")
-            add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
-            logger.info(f"Saved weighted model at {tmp_model_path}.")
-
     if not dir_model.is_dir():
         logger.error(f'Error: {args.model} is not a directory')
         sys.exit(1)