summaryrefslogtreecommitdiff
path: root/ollama.spec
diff options
context:
space:
mode:
Diffstat (limited to 'ollama.spec')
-rw-r--r--ollama.spec147
1 files changed, 147 insertions, 0 deletions
diff --git a/ollama.spec b/ollama.spec
new file mode 100644
index 0000000..4fa869d
--- /dev/null
+++ b/ollama.spec
@@ -0,0 +1,147 @@
+%global debug_package %{nil}
+
+Name: ollama
+Version: 0.3.6
+Release: 1
+Summary: Get up and running with Llama 3, Mistral, Gemma 2, and other large language models.
+License: MIT
+URL: https://ollama.com
+Source0: https://github.com/ollama/ollama/archive/refs/tags/v%{version}.tar.gz
+# ollma references a specific commit of llama.cpp as a submodule through git.
+Source1: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3536.tar.gz
+Source2: vendor.tar.gz
+Source3: %{name}.service
+Source4: %{name}-ui.service
+Source5: https://github.com/ollama-ui/ollama-ui/archive/refs/heads/main.tar.gz
+
+BuildRequires: cmake
+BuildRequires: golang
+BuildRequires: gcc-c++
+BuildRequires: git
+
+Requires: python3
+
+%description
+Ollama is a relatively new but powerful framework designed for
+serving machine learning models. It's designed to be efficient,
+scalable, and easy to use, making it an attractive option for
+developers and organizations looking to deploy their AI models
+into production.
+
+
+%prep
+%autosetup -p1 -n %{name}-%{version}
+
+# Set the CUDA nvcc path, default is /usr/local/cuda/bin/nvcc
+# export CUDACXX=/usr/local/cuda/bin/nvcc
+
+# Set the CUDA_LIB_DIR, default is /usr/local/cuda/lib64
+# CUDA_LIB_DIR=/usr/local/cuda/lib64
+
+tar -xzf %{SOURCE2}
+cd llm
+tar -xzf %{SOURCE1}
+mv llama.cpp-*/* llama.cpp/
+rm -rf llama.cpp-*/
+cd llama.cpp
+git init
+git config user.email "tmp@example.com"
+git config user.name "tmp"
+git add .
+git commit -a -s -m 'init'
+cd ../../
+tar -xzf %{SOURCE5}
+mv ollama-ui-main ollama-ui
+
+
+%build
+export GOPROXY=https://goproxy.cn
+# Skip the initialization of the submodule llama.cpp.
+export OLLAMA_SKIP_PATCHING=1
+
+# Set Version and ReleaseMode
+export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=%{version}\" \"-X=github.com/ollama/ollama/server.mode=release\"'"
+
+# Advanced CPU Settings
+# By default, it will compile for lowest-common-denominator, AVX, and AVX2 separately.
+# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
+# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
+# -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
+# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
+# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
+# Note: the following seem to yield slower results than AVX2 - ymmv
+# -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
+# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
+# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
+# export OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on"
+
+go generate ./...
+go build .
+
+
+%install
+install -D -m 0755 %{name} %{buildroot}%{_bindir}/%{name}
+install -D -m 0644 %{SOURCE3} %{buildroot}%{_unitdir}/%{name}.service
+install -D -m 0644 %{SOURCE4} %{buildroot}%{_unitdir}/%{name}-ui.service
+mkdir -p %{buildroot}%{_datadir}/%{name}/
+cp -r %{name}-ui/ %{buildroot}%{_datadir}/%{name}/
+
+
+%post
+if ! id ollama >/dev/null 2>&1; then
+ echo "Creating ollama user..."
+ /usr/sbin/useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
+fi
+
+if getent group render >/dev/null 2>&1; then
+ echo "Adding ollama user to render group..."
+ /usr/sbin/usermod -a -G render ollama
+fi
+
+if getent group video >/dev/null 2>&1; then
+ echo "Adding ollama user to video group..."
+ /usr/sbin/usermod -a -G video ollama
+fi
+
+echo "Adding current user to ollama group..."
+/usr/sbin/usermod -a -G ollama $(whoami)
+
+echo "Creating ollama systemd service..."
+%systemd_post %{name}.service
+%systemd_post %{name}-ui.service
+
+chown ollama /usr/share/ollama
+
+
+%preun
+%systemd_preun %{name}.service
+%systemd_preun %{name}-ui.service
+
+
+%postun
+%systemd_postun %{name}.service
+%systemd_postun %{name}-ui.service
+
+echo "Deleting ollama user..."
+userdel ollama
+echo "Deleting ollama group..."
+groupdel ollama
+
+
+%files
+%license LICENSE
+%doc README.md
+%{_bindir}/%{name}
+%{_unitdir}/%{name}.service
+%{_unitdir}/%{name}-ui.service
+%{_datadir}/%{name}/%{name}-ui
+
+
+%changelog
+* Tue Aug 20 2024 Xenochou <xenochou@qq.com> - 0.3.6-1
+- Update ollama to 0.3.6
+- Add ollama user
+- Add ollama-ui
+
+* Fri Jun 21 2024 wujing <wujing50@huawei.com> - 0.1.44-1
+- Package init