diff options
author | CoprDistGit <infra@openeuler.org> | 2024-07-27 08:20:50 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2024-07-27 08:20:50 +0000 |
commit | 8f63606371f06afa714e60b331adb3c013a9f8c6 (patch) | |
tree | 39862cd43070c5339f9f11821e7aa8ce69e2ec51 | |
parent | a058adcf35aeed6a5b566bd7a71ec8f612522fc4 (diff) |
automatic import of ollama
-rw-r--r-- | .gitignore | 2 | ||||
-rw-r--r-- | ollama.spec | 21 | ||||
-rw-r--r-- | sources | 4 |
3 files changed, 8 insertions, 19 deletions
@@ -1,3 +1,5 @@ /b3340.tar.gz /v0.2.7.tar.gz /vendor.tar.gz +/b3440.tar.gz +/v0.3.0.tar.gz diff --git a/ollama.spec b/ollama.spec index e9c0da8..f343d5c 100644 --- a/ollama.spec +++ b/ollama.spec @@ -1,14 +1,14 @@ %global debug_package %{nil} Name: ollama -Version: 0.2.7 +Version: 0.3.0 Release: 1%{?dist} Summary: Get up and running with Llama 3, Mistral, Gemma 2, and other large language models. License: MIT URL: https://ollama.com Source0: https://github.com/ollama/ollama/archive/refs/tags/v%{version}.tar.gz # ollma references a specific commit of llama.cpp as a submodule through git. -Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3340.tar.gz +Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3440.tar.gz Source2: vendor.tar.gz Source3: ollama.service @@ -16,8 +16,6 @@ BuildRequires: cmake BuildRequires: golang BuildRequires: gcc-c++ BuildRequires: git -BuildRequires: dnf -BuildRequires: dnf-command(config-manager) %description @@ -31,17 +29,6 @@ into production. %prep %autosetup -p1 -n %{name}-%{version} -%if "%{_arch}" == "x86_64" -sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo -%endif - -%if "%{_arch}" == "aarch64" -sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo -%endif - -sudo dnf clean all -sudo dnf -y install cuda-toolkit-12-5 - # Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc # export CUDACXX=/usr/local/cuda/bin/nvcc @@ -136,8 +123,8 @@ groupdel ollama %changelog -* Sat Jul 27 2024 Xenochou <xenochou@qq.com> - 0.2.7-2 -- Try add cuda build +* Sat Jul 27 2024 Xenochou <xenochou@qq.com> - 0.3.0-1 +- Update ollama to 0.3.0 * Tue Jul 23 2024 Xenochou <xenochou@qq.com> - 0.2.7-1 - Update ollama to 0.2.7 @@ -1,3 +1,3 @@ -dcc899ecc57ae0b86fafa48df94c338c b3340.tar.gz -19d5dea1e9b9658fc0cba5b8e809b12d v0.2.7.tar.gz +35870a52a735fdf30a63e11081a3a806 b3440.tar.gz +818e0ff098c908fb49e2d5ae85646f99 v0.3.0.tar.gz 8c4cb7e94d9b72819625422317fa6f9b vendor.tar.gz |