diff options
author | CoprDistGit <infra@openeuler.org> | 2024-07-27 07:06:16 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2024-07-27 07:06:16 +0000 |
commit | 0f3d344c4930f3bbb3a370b65316a1430c69b237 (patch) | |
tree | c39eb400c177a696e1161625a963b292108c1d70 | |
parent | f40bb03c14896b7596923b38b118f1c6544eab30 (diff) |
automatic import of ollama
-rw-r--r-- | ollama.spec | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/ollama.spec b/ollama.spec index c3138fb..e55a093 100644 --- a/ollama.spec +++ b/ollama.spec @@ -16,6 +16,7 @@ BuildRequires: cmake BuildRequires: golang BuildRequires: gcc-c++ BuildRequires: git +BuildRequires: dnf %description @@ -28,11 +29,24 @@ into production. %prep %autosetup -p1 -n %{name}-%{version} -# Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc. + +%if "%{_arch}" == "x86_64" +dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo +%endif + +%if "%{_arch}" == "aarch64" +dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo +%endif + +dnf clean all +dnf -y install cuda-toolkit-12-5 + +# Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc # export CUDACXX=/usr/local/cuda/bin/nvcc -# Set the CUDA_LIB_DIR +# Set the CUDA_LIB_DIR, default is /usr/local/cuda/lib64 # CUDA_LIB_DIR=/usr/local/cuda/lib64 + tar -xzf %{SOURCE2} cd llm tar -xzf %{SOURCE1} @@ -52,8 +66,8 @@ export GOPROXY=https://goproxy.cn # Skip the initialization of the submodule llama.cpp. export OLLAMA_SKIP_PATCHING=1 -# Set ReleaseMode -sed -i 's/var mode string = gin.DebugMode/var mode string = gin.ReleaseMode/' server/routes.go +# Set Version and ReleaseMode +export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=%{version}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" # Advanced CPU Settings # By default, it will compile for lowest-common-denominator, AVX, and AVX2 separately. @@ -121,12 +135,15 @@ groupdel ollama %changelog -* Tue Jul 23 2024 Xenochou <xenochou@qq.com> - 0.2.7-0 +* Sat Jul 27 2024 Xenochou <xenochou@qq.com> - 0.2.7-2 +- Try add cuda build + +* Tue Jul 23 2024 Xenochou <xenochou@qq.com> - 0.2.7-1 - Update ollama to 0.2.7 - Add ollama user -* Mon Jul 15 2024 Xenochou <xenochou@qq.com> - 0.2.6-0 +* Mon Jul 15 2024 Xenochou <xenochou@qq.com> - 0.2.6-1 - Update ollama to 0.2.6 -* Fri Jun 21 2024 wujing <wujing50@huawei.com> - 0.1.44-0 +* Fri Jun 21 2024 wujing <wujing50@huawei.com> - 0.1.44-1 - Package init |