1
0
Fork 0
tinyrocs/docs/_source/_static/apps/build-llamacpp.sh

32 lines
1.0 KiB
Bash

# ubuntu
git clone --recursive https://github.com/ggerganov/llama.cpp
cd llama.cpp/
rm -rf build
cmake -B build -G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_COMPILER=/opt/rocm/llvm/bin/clang++ \
-DCMAKE_C_COMPILER=hipcc \
-DCMAKE_C_COMPILER_AR=/opt/rocm/lib/llvm/bin/llvm-ar \
-DCMAKE_AR=/opt/rocm/lib/llvm/bin/llvm-ar \
-DCMAKE_ADDR2LINE=/opt/rocm/lib/llvm/bin/llvm-addr2line \
-DCMAKE_C_COMPILER_RANLIB=/opt/rocm/lib/llvm/bin/llvm-ranlib \
-DCMAKE_LINKER=/opt/rocm/lib/llvm/bin/ld.lld \
-DCMAKE_NM=/opt/rocm/lib/llvm/bin/llvm-nm \
-DCMAKE_OBJCOPY=/opt/rocm/lib/llvm/bin/llvm-objcopy \
-DCMAKE_OBJDUMP=/opt/rocm/lib/llvm/bin/llvm-objdump \
-DCMAKE_RANLIB=/opt/rocm/lib/llvm/bin/llvm-ranlib \
-DCMAKE_READELF=/opt/rocm/lib/llvm/bin/llvm-readelf \
-DCMAKE_STRIP=/opt/rocm/lib/llvm/bin/llvm-strip \
-DLLAMA_AVX2=ON \
-DLLAMA_AVX=ON \
-DLLAMA_HIPBLAS=ON \
-DLLAMA_FMA=ON \
-DLLAMA_LTO=ON \
-DLLAMA_HIP_UMA=OFF \
-DLLAMA_QKK_64=OFF \
-DLLAMA_VULKAN=OFF \
-DLLAMA_F16C=ON \
-DAMDGPU_TARGETS=gfx1100
ninja -C build