blob: 693137c0ed43542e783aedd3788c9e02fa83c08a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
%define debug_package %{nil}
%global llama_commitid b4016
Name: llama.cpp
Version: 20240531
Release: 2
License: MIT
Summary: Port of English lagre model LLaMA implemented based on C/C++
URL: https://github.com/ggerganov/llama.cpp
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/tags/%{llama_commitid}.tar.gz
Patch6000: backport-CVE-2024-41130.patch
Patch6001: backport-CVE-2024-42477-CVE-2024-42478-CVE-2024-42479.patch
BuildRequires: gcc,gcc-c++,cmake
%description
Port of English lagre model LLaMA implemented based on C/C++,
it can be used for model dialogue based on local laptops.
%prep
%autosetup -b 0 -n %{name}-%{llama_commitid} -p1
%build
mkdir llama_builddir
pushd llama_builddir
cmake ..
%make_build
popd
%install
pushd llama_builddir
%make_install
mv %{buildroot}%{_prefix}/local/bin/main %{buildroot}%{_prefix}/local/bin/llama_cpp_main
mv %{buildroot}%{_prefix}/local/bin/convert-hf-to-gguf.py %{buildroot}%{_prefix}/local/bin/llama_convert-hf-to-gguf.py
mv %{buildroot}%{_prefix}/local/* %{buildroot}%{_prefix}
popd
%files
%{_bindir}/*
%{_includedir}/*
%{_libdir}/*
%changelog
* Wed Aug 28 2024 zhoupengcheng <zhoupengcheng11@huawei.com> - 20240531-2
- fix CVE-2024-42477,CVE-2024-42478,CVE-2024-42479.patch,CVE-2024-41130
* Fri Jun 21 2024 zhoupengcheng <zhoupengcheng11@huawei.com> - 20240531-1
- update llama.cpp to b3051
* Tue May 14 2024 wangshuo <wangshuo@kylinos.cn> - 20230815-4
- add loongarch64 support
* Wed Sep 20 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-3
- rename /usr/bin/convert.py
- update long-term yum.repo in dockerfile
* Tue Sep 19 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-2
- add dockerfile
* Wed Aug 16 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-1
- Init package
|