summaryrefslogtreecommitdiff
path: root/llama.cpp.spec
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2024-11-06 12:53:48 +0000
committerCoprDistGit <infra@openeuler.org>2024-11-06 12:53:48 +0000
commit1c1f0f3551767b14e132be4832d6e34f2e115542 (patch)
tree23552ef9ed433999cf304c4232dfc6838661473d /llama.cpp.spec
parentd3811f78647af293daa4d82f1225f2c817c96ef5 (diff)
automatic import of llama.cpp
Diffstat (limited to 'llama.cpp.spec')
-rw-r--r--llama.cpp.spec63
1 files changed, 63 insertions, 0 deletions
diff --git a/llama.cpp.spec b/llama.cpp.spec
new file mode 100644
index 0000000..7c77318
--- /dev/null
+++ b/llama.cpp.spec
@@ -0,0 +1,63 @@
+%define debug_package %{nil}
+%global llama_commitid b4013
+
+Name: llama.cpp
+Version: 20241102
+Release: 2
+License: MIT
+Summary: Port of English lagre model LLaMA implemented based on C/C++
+
+URL: https://github.com/ggerganov/llama.cpp
+Source0: https://github.com/ggerganov/llama.cpp/archive/refs/tags/%{llama_commitid}.tar.gz
+
+
+BuildRequires: gcc,gcc-c++,cmake
+
+%description
+Port of English lagre model LLaMA implemented based on C/C++,
+it can be used for model dialogue based on local laptops.
+
+%prep
+%autosetup -b 0 -n %{name}-%{llama_commitid} -p1
+
+%build
+mkdir llama_builddir
+pushd llama_builddir
+cmake ..
+%make_build
+popd
+
+%install
+pushd llama_builddir
+%make_install
+#mv %{buildroot}%{_prefix}/local/bin/main %{buildroot}%{_prefix}/local/bin/llama_cpp_main
+mv %{buildroot}%{_prefix}/local/bin/convert_hf_to_gguf.py %{buildroot}%{_prefix}/local/bin/llama_convert_hf_to_gguf.py
+mv %{buildroot}%{_prefix}/local/* %{buildroot}%{_prefix}
+mv %{buildroot}%{_prefix}/lib/pkgconfig/llama.pc %{buildroot}%{_prefix}
+popd
+
+%files
+%{_bindir}/*
+%{_includedir}/*
+%{_libdir}/*
+%{_prefix}/llama.pc
+
+%changelog
+* Wed Aug 28 2024 zhoupengcheng <zhoupengcheng11@huawei.com> - 20240531-2
+- fix CVE-2024-42477,CVE-2024-42478,CVE-2024-42479.patch,CVE-2024-41130
+
+* Fri Jun 21 2024 zhoupengcheng <zhoupengcheng11@huawei.com> - 20240531-1
+- update llama.cpp to b3051
+
+* Tue May 14 2024 wangshuo <wangshuo@kylinos.cn> - 20230815-4
+- add loongarch64 support
+
+* Wed Sep 20 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-3
+- rename /usr/bin/convert.py
+- update long-term yum.repo in dockerfile
+
+* Tue Sep 19 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-2
+- add dockerfile
+
+* Wed Aug 16 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-1
+- Init package