diff --git a/b6602.tar.gz b/b6602.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..f4ac243156c0f8c792ec58325ef5b09c67323e09 Binary files /dev/null and b/b6602.tar.gz differ diff --git a/llama.cpp.spec b/llama.cpp.spec index 01defcfe9ed52729733d3c2ad0467d561e548410..7368747f64654c343e74f831ae41e52526c4ac14 100644 --- a/llama.cpp.spec +++ b/llama.cpp.spec @@ -1,9 +1,9 @@ %define debug_package %{nil} -%global llama_commitid master-3ebb009 +%global llama_commitid b6602 Name: llama.cpp -Version: 20230815 -Release: 3 +Version: 20250928 +Release: 5 License: MIT Summary: Port of English lagre model LLaMA implemented based on C/C++ @@ -16,29 +16,68 @@ BuildRequires: gcc,gcc-c++,cmake Port of English lagre model LLaMA implemented based on C/C++, it can be used for model dialogue based on local laptops. +%package devel +Summary: Port of Facebook's LLaMA model in C/C++ +Requires: %{name}%{?_isa} = %{version}-%{release} + +%description devel +Port of English lagre model LLaMA implemented based on C/C++, +it can be used for model dialogue based on local laptops. + %prep %autosetup -b 0 -n %{name}-%{llama_commitid} -p1 %build -mkdir llama_builddir -pushd llama_builddir -cmake .. -%make_build -popd +%cmake -DCMAKE_INSTALL_PREFIX=%{_prefix} \ + -DCMAKE_INSTALL_LIBDIR=%{_libdir} \ + -DCMAKE_INSTALL_BINDIR=%{_bindir} \ + -DCMAKE_INSTALL_INCLUDEDIR=%{_includedir} +%cmake_build %install -pushd llama_builddir -%make_install -mv %{buildroot}%{_prefix}/local/bin/main %{buildroot}%{_prefix}/local/bin/llama_cpp_main -mv %{buildroot}%{_prefix}/local/bin/convert.py %{buildroot}%{_prefix}/local/bin/llama_convert.py -mv %{buildroot}%{_prefix}/local/* %{buildroot}%{_prefix} -popd +%cmake_install + %files %{_bindir}/* -%{_libdir}/libembdinput.a +%{_libdir}/*.so + +%files devel +%dir %{_libdir}/cmake/llama +%doc README.md +%{_includedir}/ggml.h +%{_includedir}/ggml-*.h +%{_includedir}/llama.h +%{_includedir}/llama-*.h +%{_libdir}/cmake/llama/*.cmake +%{_exec_prefix}/lib/pkgconfig/llama.pc %changelog +* Sat Sep 27 2025 StephenCurry - 20250927-5 +- Upgrade to llama.cpp b6602 version + +* Mon Jul 21 2025 PshySimon - 20241210-4 +- fix CVE-2025-53630 + +* Fri Jul 4 2025 PshySimon - 20241210-3 +- fix CVE-2025-52566 + +* Wed Jul 2 2025 PshySimon - 20241210-2 +- fix CVE-2025-49847 + +* Tue Dec 10 2024 misaka00251 - 20241210-1 +- Upgrade to 20241210 +- Split headers into devel package + +* Wed Aug 28 2024 zhoupengcheng - 20240531-2 +- fix CVE-2024-42477,CVE-2024-42478,CVE-2024-42479.patch,CVE-2024-41130 + +* Fri Jun 21 2024 zhoupengcheng - 20240531-1 +- update llama.cpp to b3051 + +* Tue May 14 2024 wangshuo - 20230815-4 +- add loongarch64 support + * Wed Sep 20 2023 zhoupengcheng - 20230815-3 - rename /usr/bin/convert.py - update long-term yum.repo in dockerfile diff --git a/master-3ebb009.tar.gz b/master-3ebb009.tar.gz deleted file mode 100644 index 356831c48e7b03dea92a6d97b2f5c5b9d5d1c257..0000000000000000000000000000000000000000 Binary files a/master-3ebb009.tar.gz and /dev/null differ