# Generated by abuild 3.17.0_rc1-r2
# using fakeroot version 1.37.2
pkgname = llama.cpp-cpu
pkgver = 0.0.8871-r0
pkgdesc = LLM inference in C/C++ (with Vulkan GPU acceleration)
url = https://github.com/ggml-org/llama.cpp
builddate = 1776805854
packager = Buildozer <alpine-devel@lists.alpinelinux.org>
size = 7162392
arch = aarch64
origin = llama.cpp
commit = b3d9e195749ddaca4d0e3c473b2b95c0ee8a5bc5
maintainer = Hugo Osvaldo Barrera <hugo@whynothugo.nl>
license = MIT
install_if = llama.cpp=0.0.8871-r0
# automatically detected:
provides = so:llama.cpp:libggml-cpu-armv8.0_1.so=0
provides = so:llama.cpp:libggml-cpu-armv8.2_1.so=0
provides = so:llama.cpp:libggml-cpu-armv8.2_2.so=0
provides = so:llama.cpp:libggml-cpu-armv8.2_3.so=0
provides = so:llama.cpp:libggml-cpu-armv8.6_1.so=0
provides = so:llama.cpp:libggml-cpu-armv8.6_2.so=0
provides = so:llama.cpp:libggml-cpu-armv9.2_1.so=0
provides = so:llama.cpp:libggml-cpu-armv9.2_2.so=0
depend = so:libc.musl-aarch64.so.1
depend = so:libgcc_s.so.1
depend = so:libgomp.so.1
depend = so:libstdc++.so.6
depend = so:llama.cpp:libggml-base.so.0
datahash = 91a385bbdcc6f5016d3d5fea2909c02c847dd280c044a7612c97ccc24c93fade
