Skip to content

Commit

Permalink
Merge pull request easybuilders#21959 from pavelToman/20241203131937_…
Browse files Browse the repository at this point in the history
…new_pr_llama-cpp-python032

{tools}[gfbf/2023a] llama-cpp-python v0.3.2 w/ CUDA 12.1.1
  • Loading branch information
boegel authored Dec 15, 2024
2 parents 81cf52a + 4bb2db7 commit 3f040de
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
easyblock = 'PythonBundle'

name = 'llama-cpp-python'
version = '0.3.2'
versionsuffix = '-CUDA-%(cudaver)s'

homepage = "https://github.com/abetlen/llama-cpp-python/"
description = """The main goal of llama.cpp is to enable LLM inference with minimal setup and state-of-the-art
performance on a wide range of hardware - locally and in the cloud. Simple Python bindings."""

toolchain = {'name': 'gfbf', 'version': '2023a'}

builddependencies = [
('CMake', '3.26.3'),
('scikit-build-core', '0.9.3'),
]

dependencies = [
('CUDA', '12.1.1', '', SYSTEM),
('Python', '3.11.3'),
('Python-bundle-PyPI', '2023.06'),
('SciPy-bundle', '2023.07'),
('PyYAML', '6.0'),
('tqdm', '4.66.1'),
]

use_pip = True
sanity_pip_check = True

exts_list = [
('diskcache', '5.6.3', {
'checksums': ['2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc'],
}),
('huggingface-hub', '0.26.3', {
'sources': ['huggingface_hub-%(version)s.tar.gz'],
'checksums': ['90e1fe62ffc26757a073aaad618422b899ccf9447c2bba8c902a90bef5b42e1d'],
}),
(name, version, {
'modulename': 'llama_cpp',
'preinstallopts': 'export CMAKE_ARGS="-DGGML_CUDA=on" && ',
'source_tmpl': 'llama_cpp_python-%(version)s.tar.gz',
'checksums': ['8fbf246a55a999f45015ed0d48f91b4ae04ae959827fac1cd6ac6ec65aed2e2f'],
}),
]

moduleclass = 'tools'
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
easyblock = 'PythonBundle'

name = 'llama-cpp-python'
version = '0.3.2'

homepage = "https://github.com/abetlen/llama-cpp-python/"
description = """The main goal of llama.cpp is to enable LLM inference with minimal setup and state-of-the-art
performance on a wide range of hardware - locally and in the cloud. Simple Python bindings."""

toolchain = {'name': 'gfbf', 'version': '2023a'}

builddependencies = [
('CMake', '3.26.3'),
('scikit-build-core', '0.9.3'),
]

dependencies = [
('Python', '3.11.3'),
('Python-bundle-PyPI', '2023.06'),
('SciPy-bundle', '2023.07'),
('PyYAML', '6.0'),
('tqdm', '4.66.1'),
]

use_pip = True
sanity_pip_check = True

exts_list = [
('diskcache', '5.6.3', {
'checksums': ['2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc'],
}),
('huggingface-hub', '0.26.3', {
'sources': ['huggingface_hub-%(version)s.tar.gz'],
'checksums': ['90e1fe62ffc26757a073aaad618422b899ccf9447c2bba8c902a90bef5b42e1d'],
}),
(name, version, {
'modulename': 'llama_cpp',
'source_tmpl': 'llama_cpp_python-%(version)s.tar.gz',
'preinstallopts': 'export CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS" && ',
'checksums': ['8fbf246a55a999f45015ed0d48f91b4ae04ae959827fac1cd6ac6ec65aed2e2f'],
}),
]

moduleclass = 'tools'

0 comments on commit 3f040de

Please sign in to comment.