diff options
Diffstat (limited to 'ollama.spec')
-rw-r--r-- | ollama.spec | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/ollama.spec b/ollama.spec index e9c0da8..f343d5c 100644 --- a/ollama.spec +++ b/ollama.spec @@ -1,14 +1,14 @@ %global debug_package %{nil} Name: ollama -Version: 0.2.7 +Version: 0.3.0 Release: 1%{?dist} Summary: Get up and running with Llama 3, Mistral, Gemma 2, and other large language models. License: MIT URL: https://ollama.com Source0: https://github.com/ollama/ollama/archive/refs/tags/v%{version}.tar.gz # ollma references a specific commit of llama.cpp as a submodule through git. -Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3340.tar.gz +Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3440.tar.gz Source2: vendor.tar.gz Source3: ollama.service @@ -16,8 +16,6 @@ BuildRequires: cmake BuildRequires: golang BuildRequires: gcc-c++ BuildRequires: git -BuildRequires: dnf -BuildRequires: dnf-command(config-manager) %description @@ -31,17 +29,6 @@ into production. %prep %autosetup -p1 -n %{name}-%{version} -%if "%{_arch}" == "x86_64" -sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo -%endif - -%if "%{_arch}" == "aarch64" -sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo -%endif - -sudo dnf clean all -sudo dnf -y install cuda-toolkit-12-5 - # Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc # export CUDACXX=/usr/local/cuda/bin/nvcc @@ -136,8 +123,8 @@ groupdel ollama %changelog -* Sat Jul 27 2024 Xenochou <xenochou@qq.com> - 0.2.7-2 -- Try add cuda build +* Sat Jul 27 2024 Xenochou <xenochou@qq.com> - 0.3.0-1 +- Update ollama to 0.3.0 * Tue Jul 23 2024 Xenochou <xenochou@qq.com> - 0.2.7-1 - Update ollama to 0.2.7 |