b4013.tar.gz
llama.cpp.spec
/usr/bin/llama-baby-llama
/usr/bin/llama-batched
/usr/bin/llama-batched-bench
/usr/bin/llama-bench
/usr/bin/llama-cli
/usr/bin/llama-convert-llama2c-to-ggml
/usr/bin/llama-cvector-generator
/usr/bin/llama-embedding
/usr/bin/llama-eval-callback
/usr/bin/llama-export-lora
/usr/bin/llama-gbnf-validator
/usr/bin/llama-gguf
/usr/bin/llama-gguf-hash
/usr/bin/llama-gguf-split
/usr/bin/llama-gritlm
/usr/bin/llama-imatrix
/usr/bin/llama-infill
/usr/bin/llama-llava-cli
/usr/bin/llama-lookahead
/usr/bin/llama-lookup
/usr/bin/llama-lookup-create
/usr/bin/llama-lookup-merge
/usr/bin/llama-lookup-stats
/usr/bin/llama-minicpmv-cli
/usr/bin/llama-parallel
/usr/bin/llama-passkey
/usr/bin/llama-perplexity
/usr/bin/llama-quantize
/usr/bin/llama-quantize-stats
/usr/bin/llama-retrieval
/usr/bin/llama-save-load-state
/usr/bin/llama-server
/usr/bin/llama-simple
/usr/bin/llama-simple-chat
/usr/bin/llama-speculative
/usr/bin/llama-tokenize
/usr/bin/llama_convert_hf_to_gguf.py
/usr/bin/test-arg-parser
/usr/bin/test-autorelease
/usr/bin/test-backend-ops
/usr/bin/test-barrier
/usr/bin/test-chat-template
/usr/bin/test-grad0
/usr/bin/test-grammar-integration
/usr/bin/test-grammar-parser
/usr/bin/test-json-schema-to-grammar
/usr/bin/test-llama-grammar
/usr/bin/test-log
/usr/bin/test-model-load-cancel
/usr/bin/test-quantize-fns
/usr/bin/test-quantize-perf
/usr/bin/test-rope
/usr/bin/test-sampling
/usr/bin/test-tokenizer-0
/usr/bin/test-tokenizer-1-bpe
/usr/bin/test-tokenizer-1-spm
/usr/include/ggml-alloc.h
/usr/include/ggml-backend.h
/usr/include/ggml-blas.h
/usr/include/ggml-cann.h
/usr/include/ggml-cuda.h
/usr/include/ggml-kompute.h
/usr/include/ggml-metal.h
/usr/include/ggml-rpc.h
/usr/include/ggml-sycl.h
/usr/include/ggml-vulkan.h
/usr/include/ggml.h
/usr/include/llama.h
/usr/lib64/cmake
/usr/lib64/cmake/llama
/usr/lib64/cmake/llama/llama-config.cmake
/usr/lib64/cmake/llama/llama-version.cmake
/usr/lib64/libggml.so
/usr/lib64/libllama.so
/usr/lib64/libllava_shared.so
/usr/llama.pc