]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
cmake : install targets (#2256)
authorwzy <redacted>
Wed, 19 Jul 2023 07:01:11 +0000 (15:01 +0800)
committerGitHub <redacted>
Wed, 19 Jul 2023 07:01:11 +0000 (10:01 +0300)
fix #2252

17 files changed:
CMakeLists.txt
convert-lora-to-ggml.py [changed mode: 0644->0755]
convert.py [changed mode: 0644->0755]
examples/baby-llama/CMakeLists.txt
examples/benchmark/CMakeLists.txt
examples/embd-input/CMakeLists.txt
examples/embedding/CMakeLists.txt
examples/main/CMakeLists.txt
examples/metal/CMakeLists.txt
examples/perplexity/CMakeLists.txt
examples/quantize-stats/CMakeLists.txt
examples/quantize/CMakeLists.txt
examples/save-load-state/CMakeLists.txt
examples/server/CMakeLists.txt
examples/simple/CMakeLists.txt
examples/train-text-from-scratch/CMakeLists.txt
tests/CMakeLists.txt

index d9381dae1a10ae23dc5015a9bb64c09e47f5e7ba..abc96814d632df0bdf66ffdce4cb6164b99c9c0f 100644 (file)
@@ -512,6 +512,7 @@ if (BUILD_SHARED_LIBS)
     set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
     add_library(ggml_shared SHARED $<TARGET_OBJECTS:ggml>)
     target_link_libraries(ggml_shared PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
+    install(TARGETS ggml_shared LIBRARY)
 endif()
 
 add_library(llama
@@ -533,8 +534,32 @@ if (BUILD_SHARED_LIBS)
     if (LLAMA_METAL)
         set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
     endif()
+    install(TARGETS llama LIBRARY)
 endif()
 
+include(GNUInstallDirs)
+install(
+    FILES convert.py
+    PERMISSIONS
+        OWNER_READ
+        OWNER_WRITE
+        OWNER_EXECUTE
+        GROUP_READ
+        GROUP_EXECUTE
+        WORLD_READ
+        WORLD_EXECUTE
+    DESTINATION ${CMAKE_INSTALL_BINDIR})
+install(
+    FILES convert-lora-to-ggml.py
+    PERMISSIONS
+        OWNER_READ
+        OWNER_WRITE
+        OWNER_EXECUTE
+        GROUP_READ
+        GROUP_EXECUTE
+        WORLD_READ
+        WORLD_EXECUTE
+    DESTINATION ${CMAKE_INSTALL_BINDIR})
 
 #
 # programs, examples and tests
old mode 100644 (file)
new mode 100755 (executable)
index f43c836..b4999ff
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 import json
 import os
 import re
old mode 100644 (file)
new mode 100755 (executable)
index 7a2705e..e3f1096
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 import argparse
 import concurrent.futures
 import copy
index d2ce36367474ff6429fcc0d9fe8ae2b69f348c91..7b70227a525e1a158cd775e089674b00c8c4d8d1 100644 (file)
@@ -1,4 +1,5 @@
 set(TARGET baby-llama)
 add_executable(${TARGET} baby-llama.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
index 0376961945ad7e4656ddab95acaebd7a045754a9..3f3415350919c99058ace62fa61ebc20418e4c6f 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET benchmark)
 add_executable(${TARGET} benchmark-matmult.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index 2b623953e80619451619e66d8724ae77962ac83c..5bbb1ea02a3c6aafd54d618aa33a003c0cea532a 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET embdinput)
 add_library(${TARGET} embd-input-lib.cpp embd-input.h)
+install(TARGETS ${TARGET} LIBRARY)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
@@ -8,6 +9,7 @@ endif()
 
 set(TARGET embd-input-test)
 add_executable(${TARGET} embd-input-test.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama embdinput ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index db73b6b44f07f479161ed292385fb021df2ae6f2..0c752c7bbb59f847876471a9ca26c88e5eb13dcc 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET embedding)
 add_executable(${TARGET} embedding.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index c364242fbadb425f37cf2e18e17816484d8c80f7..cc188894804bab4da86c67df7b5e152acab82305 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET main)
 add_executable(${TARGET} main.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index a8c4284a53642edad16696a5edc4dd4e3c8b9269..f16d491655948e72322016d85bca9ad464be8c0f 100644 (file)
@@ -1,3 +1,4 @@
 set(TEST_TARGET metal)
 add_executable(${TEST_TARGET} metal.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TEST_TARGET} PRIVATE ggml)
index 61b17b828dd1bc9efc0b4aee2d15457f229671d7..af00b4e16501626eff836f769339daa3b9935018 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET perplexity)
 add_executable(${TARGET} perplexity.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index 7bebc11a124b05b8675184f65e096b6c18e37cbf..c5c394058ced81642e6a503f86a87b448f88d474 100644 (file)
@@ -1,4 +1,5 @@
 set(TARGET quantize-stats)
 add_executable(${TARGET} quantize-stats.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
index 475fc8be885a640c093e02ccf92eae031a9584ba..47d0be72ecc0fa15c353af8af0fbf1d81fa65fa3 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET quantize)
 add_executable(${TARGET} quantize.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index 08dbe5c2b3edf12b27d8de70cb2c12601e432f42..eadd13cdf7930a09bdf6e1790c596a8b0a652ff5 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET save-load-state)
 add_executable(${TARGET} save-load-state.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index 07ba76ad35bbdb558beb3ecc0930b910c9b45c43..812a24b095cdbb4257fc636cc9fb5d7a171edc79 100644 (file)
@@ -2,6 +2,7 @@ set(TARGET server)
 option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
 include_directories(${CMAKE_CURRENT_SOURCE_DIR})
 add_executable(${TARGET} server.cpp json.hpp httplib.h)
+install(TARGETS ${TARGET} RUNTIME)
 target_compile_definitions(${TARGET} PRIVATE
     SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
 )
index 1568f7364184ac7ad6f59fd5f0d944a97de733f0..0ac9cb03a8eca4494f38e6c57efc4909d63ffb9f 100644 (file)
@@ -1,5 +1,6 @@
 set(TARGET simple)
 add_executable(${TARGET} simple.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
 if(TARGET BUILD_INFO)
index 1a44c4961c0845ee3ce224eb7598a5f85217c0f0..4459516d093d62bab42be9e80bf1aa65f3e660d5 100644 (file)
@@ -1,4 +1,5 @@
 set(TARGET train-text-from-scratch)
 add_executable(${TARGET} train-text-from-scratch.cpp)
+install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
index 1acf050a743e4c38638fa4364412ce019f40a449..11ec6c7252f46e8f8e8d29f327b7654a24f7f094 100644 (file)
@@ -1,6 +1,7 @@
 function(llama_add_test source)
     get_filename_component(TEST_TARGET ${source} NAME_WE)
     add_executable(${TEST_TARGET} ${source})
+    install(TARGETS ${TEST_TARGET} RUNTIME)
     target_link_libraries(${TEST_TARGET} PRIVATE llama)
     add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
 endfunction()