summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2024-07-23 13:15:12 +0000
committerCoprDistGit <infra@openeuler.org>2024-07-23 13:15:12 +0000
commit8a107d4d2804f59a49d994f9b8095184736a7c7c (patch)
treede5770e899e27915bd8ed92c8ae894a18c713fcb
parentb90f6a086168dd4ca2229c71a43bf9f4500cac9e (diff)
automatic import of ollama
-rw-r--r--.gitignore3
-rw-r--r--ollama.service13
-rw-r--r--ollama.spec129
-rw-r--r--sources3
4 files changed, 127 insertions, 21 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..4e610c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1,3 @@
+/b3340.tar.gz
+/v0.2.7.tar.gz
+/vendor.tar.gz
diff --git a/ollama.service b/ollama.service
new file mode 100644
index 0000000..5ddc9b1
--- /dev/null
+++ b/ollama.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Ollama Service
+After=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ollama serve
+User=ollama
+Group=ollama
+Restart=always
+RestartSec=3
+
+[Install]
+WantedBy=default.target
diff --git a/ollama.spec b/ollama.spec
index da52d23..64130ab 100644
--- a/ollama.spec
+++ b/ollama.spec
@@ -1,42 +1,129 @@
-%global _empty_manifest_terminate_build 0
-%global _unpackaged_files_terminate_build 0
-Name: ollama
-Version: 0.2.5
+%global debug_package %{nil}
+
+Name: ollama
+Version: 0.2.7
Release: 1%{?dist}
Summary: Get up and running with Llama 3, Mistral, Gemma 2, and other large language models.
+License: MIT
+URL: https://ollama.com
+Source0: https://github.com/ollama/ollama/archive/refs/tags/v%{version}.tar.gz
+# ollma references a specific commit of llama.cpp as a submodule through git.
+Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3340.tar.gz
+Source2: vendor.tar.gz
+Source3: ollama.service
-License: MIT license
-URL: https://github.com/ollama/%{name}
-# Source0: https://github.com/ollama/%{name}/archive/refs/tags/v%{version}.tar.gz
-
-BuildRequires: golang
BuildRequires: cmake
-BuildRequires: gcc
-BuildRequires: g++
+BuildRequires: golang
+BuildRequires: gcc-c++
BuildRequires: git
+
%description
-Get up and running with Llama 3, Mistral, Gemma 2, and other large language models.
+Ollama is a relatively new but powerful framework designed for
+serving machine learning models. It's designed to be efficient,
+scalable, and easy to use, making it an attractive option for
+developers and organizations looking to deploy their AI models
+into production.
+
%prep
-%setup -c -T
-git clone https://gitee.com/mirrors/ollama.git
+%autosetup -p1 -n %{name}-%{version}
+# Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc.
+# export CUDACXX=/usr/local/cuda/bin/nvcc
+
+# Set the CUDA_LIB_DIR
+# CUDA_LIB_DIR=/usr/local/cuda/lib64
+tar -xzf %{SOURCE2}
+cd llm
+tar -xzf %{SOURCE1}
+mv llama.cpp-*/* llama.cpp/
+rm -rf llama.cpp-*/
+cd llama.cpp
+git init
+git add .
+git commit -a -s -m 'init'
+cd ../../
+
%build
-cd ollama
-sed -i 's|https://github.com/ggerganov/llama.cpp.git|https://gitee.com/cxunmz/llama.cpp.git|' .gitmodules
-export GOPROXY=https://goproxy.cn
+# Skip the initialization of the submodule llama.cpp.
+export OLLAMA_SKIP_PATCHING=1
+
+# Set ReleaseMode
+sed -i 's/var mode string = gin.DebugMode/var mode string = gin.ReleaseMode/' server/routes.go
+
+# Advanced CPU Settings
+# By default, it will compile for lowest-common-denominator, AVX, and AVX2 separately.
+# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
+# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
+# -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
+# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
+# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
+# Note: the following seem to yield slower results than AVX2 - ymmv
+# -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
+# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
+# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
+# export OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on"
+
go generate ./...
go build .
+
%install
-cd ollama
mkdir -p %{buildroot}%{_bindir}
install -m 0755 %{name} %{buildroot}%{_bindir}/%{name}
+install -D -m 0644 %{SOURCE3} %{buildroot}%{_unitdir}/%{name}.service
+
+
+%post
+if ! id ollama >/dev/null 2>&1; then
+ echo "Creating ollama user..."
+ /usr/sbin/useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
+fi
+
+if getent group render >/dev/null 2>&1; then
+ echo "Adding ollama user to render group..."
+ /usr/sbin/usermod -a -G render ollama
+fi
+
+if getent group video >/dev/null 2>&1; then
+ echo "Adding ollama user to video group..."
+ /usr/sbin/usermod -a -G video ollama
+fi
+
+echo "Adding current user to ollama group..."
+/usr/sbin/usermod -a -G ollama $(whoami)
+
+echo "Creating ollama systemd service..."
+%systemd_post %{name}.service
+
+
+%preun
+%systemd_preun %{name}.service
+
+
+%postun
+%systemd_postun %{name}.service
+echo "Deleting ollama user..."
+userdel ollama
+echo "Deleting ollama group..."
+groupdel ollama
+
%files
-%{_bindir}/ollama
+%license LICENSE
+%doc README.md
+%{_bindir}/%{name}
+%{_unitdir}/%{name}.service
+
%changelog
-* Mon Jul 15 2024 Xenochou <xenochou@qq.com> - 1
-- support ollama 0.2.5
+* Tue Jul 23 2024 Xenochou <xenochou@qq.com> - 0.2.7-0
+- Update ollama to 0.2.7
+- Add ollama user
+
+* Mon Jul 15 2024 Xenochou <xenochou@qq.com> - 0.2.6-0
+- Update ollama to 0.2.6
+
+* Fri Jun 21 2024 wujing <wujing50@huawei.com> - 0.1.44-0
+- Package init
diff --git a/sources b/sources
index e69de29..181d7ec 100644
--- a/sources
+++ b/sources
@@ -0,0 +1,3 @@
+dcc899ecc57ae0b86fafa48df94c338c b3340.tar.gz
+19d5dea1e9b9658fc0cba5b8e809b12d v0.2.7.tar.gz
+8c4cb7e94d9b72819625422317fa6f9b vendor.tar.gz