From 4bb2db7bd8c83cb5fdf04700b5a1fd209b494cf5 Mon Sep 17 00:00:00 2001 From: Pavel Tomanek Date: Tue, 3 Dec 2024 13:19:39 +0100 Subject: [PATCH] adding easyconfigs: llama-cpp-python-0.3.2-gfbf-2023a.eb, llama-cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb --- ...cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb | 46 +++++++++++++++++++ .../llama-cpp-python-0.3.2-gfbf-2023a.eb | 44 ++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb create mode 100644 easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a.eb diff --git a/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb b/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb new file mode 100644 index 00000000000..b2772ea22c5 --- /dev/null +++ b/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a-CUDA-12.1.1.eb @@ -0,0 +1,46 @@ +easyblock = 'PythonBundle' + +name = 'llama-cpp-python' +version = '0.3.2' +versionsuffix = '-CUDA-%(cudaver)s' + +homepage = "https://github.com/abetlen/llama-cpp-python/" +description = """The main goal of llama.cpp is to enable LLM inference with minimal setup and state-of-the-art +performance on a wide range of hardware - locally and in the cloud. Simple Python bindings.""" + +toolchain = {'name': 'gfbf', 'version': '2023a'} + +builddependencies = [ + ('CMake', '3.26.3'), + ('scikit-build-core', '0.9.3'), +] + +dependencies = [ + ('CUDA', '12.1.1', '', SYSTEM), + ('Python', '3.11.3'), + ('Python-bundle-PyPI', '2023.06'), + ('SciPy-bundle', '2023.07'), + ('PyYAML', '6.0'), + ('tqdm', '4.66.1'), +] + +use_pip = True +sanity_pip_check = True + +exts_list = [ + ('diskcache', '5.6.3', { + 'checksums': ['2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc'], + }), + ('huggingface-hub', '0.26.3', { + 'sources': ['huggingface_hub-%(version)s.tar.gz'], + 'checksums': ['90e1fe62ffc26757a073aaad618422b899ccf9447c2bba8c902a90bef5b42e1d'], + }), + (name, version, { + 'modulename': 'llama_cpp', + 'preinstallopts': 'export CMAKE_ARGS="-DGGML_CUDA=on" && ', + 'source_tmpl': 'llama_cpp_python-%(version)s.tar.gz', + 'checksums': ['8fbf246a55a999f45015ed0d48f91b4ae04ae959827fac1cd6ac6ec65aed2e2f'], + }), +] + +moduleclass = 'tools' diff --git a/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a.eb b/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a.eb new file mode 100644 index 00000000000..578011ea5c3 --- /dev/null +++ b/easybuild/easyconfigs/l/llama-cpp-python/llama-cpp-python-0.3.2-gfbf-2023a.eb @@ -0,0 +1,44 @@ +easyblock = 'PythonBundle' + +name = 'llama-cpp-python' +version = '0.3.2' + +homepage = "https://github.com/abetlen/llama-cpp-python/" +description = """The main goal of llama.cpp is to enable LLM inference with minimal setup and state-of-the-art +performance on a wide range of hardware - locally and in the cloud. Simple Python bindings.""" + +toolchain = {'name': 'gfbf', 'version': '2023a'} + +builddependencies = [ + ('CMake', '3.26.3'), + ('scikit-build-core', '0.9.3'), +] + +dependencies = [ + ('Python', '3.11.3'), + ('Python-bundle-PyPI', '2023.06'), + ('SciPy-bundle', '2023.07'), + ('PyYAML', '6.0'), + ('tqdm', '4.66.1'), +] + +use_pip = True +sanity_pip_check = True + +exts_list = [ + ('diskcache', '5.6.3', { + 'checksums': ['2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc'], + }), + ('huggingface-hub', '0.26.3', { + 'sources': ['huggingface_hub-%(version)s.tar.gz'], + 'checksums': ['90e1fe62ffc26757a073aaad618422b899ccf9447c2bba8c902a90bef5b42e1d'], + }), + (name, version, { + 'modulename': 'llama_cpp', + 'source_tmpl': 'llama_cpp_python-%(version)s.tar.gz', + 'preinstallopts': 'export CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS" && ', + 'checksums': ['8fbf246a55a999f45015ed0d48f91b4ae04ae959827fac1cd6ac6ec65aed2e2f'], + }), +] + +moduleclass = 'tools'