Fix locally compiled llama-cpp-python failing to import

This commit is contained in:
oobabooga 2024-10-14 13:24:13 -07:00
parent c9a9f63d1b
commit bb62e796eb

View File

@ -9,10 +9,11 @@ from modules import shared
from modules.cache_utils import process_llamacpp_cache from modules.cache_utils import process_llamacpp_cache
imported_module = None imported_module = None
not_available_modules = set()
def llama_cpp_lib(): def llama_cpp_lib():
global imported_module global imported_module, not_available_modules
# Determine the platform # Determine the platform
is_macos = platform.system() == 'Darwin' is_macos = platform.system() == 'Darwin'
@ -31,6 +32,9 @@ def llama_cpp_lib():
] ]
for arg, lib_name in lib_names: for arg, lib_name in lib_names:
if lib_name in not_available_modules:
continue
should_import = (arg is None or getattr(shared.args, arg)) should_import = (arg is None or getattr(shared.args, arg))
if should_import: if should_import:
@ -44,6 +48,7 @@ def llama_cpp_lib():
monkey_patch_llama_cpp_python(return_lib) monkey_patch_llama_cpp_python(return_lib)
return return_lib return return_lib
except ImportError: except ImportError:
not_available_modules.add(lib_name)
continue continue
return None return None