mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Force only 1 llama-cpp-python version at a time for now
This commit is contained in:
parent
f243b4ca9c
commit
a47de06088
@ -7,30 +7,47 @@ from modules import shared
|
|||||||
from modules.cache_utils import process_llamacpp_cache
|
from modules.cache_utils import process_llamacpp_cache
|
||||||
|
|
||||||
|
|
||||||
|
imported_module = None
|
||||||
|
|
||||||
|
|
||||||
def llama_cpp_lib():
|
def llama_cpp_lib():
|
||||||
|
global imported_module
|
||||||
|
|
||||||
return_lib = None
|
return_lib = None
|
||||||
|
|
||||||
if shared.args.cpu:
|
if shared.args.cpu:
|
||||||
|
if imported_module and imported_module != 'llama_cpp':
|
||||||
|
raise Exception(f"Cannot import 'llama_cpp' because '{imported_module}' is already imported. See issue #1575 in llama-cpp-python. Please restart the server before attempting to use a different version of llama-cpp-python.")
|
||||||
try:
|
try:
|
||||||
return_lib = importlib.import_module('llama_cpp')
|
return_lib = importlib.import_module('llama_cpp')
|
||||||
|
imported_module = 'llama_cpp'
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if shared.args.tensorcores and return_lib is None:
|
if shared.args.tensorcores and return_lib is None:
|
||||||
|
if imported_module and imported_module != 'llama_cpp_cuda_tensorcores':
|
||||||
|
raise Exception(f"Cannot import 'llama_cpp_cuda_tensorcores' because '{imported_module}' is already imported. See issue #1575 in llama-cpp-python. Please restart the server before attempting to use a different version of llama-cpp-python.")
|
||||||
try:
|
try:
|
||||||
return_lib = importlib.import_module('llama_cpp_cuda_tensorcores')
|
return_lib = importlib.import_module('llama_cpp_cuda_tensorcores')
|
||||||
|
imported_module = 'llama_cpp_cuda_tensorcores'
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if return_lib is None:
|
if return_lib is None:
|
||||||
|
if imported_module and imported_module != 'llama_cpp_cuda':
|
||||||
|
raise Exception(f"Cannot import 'llama_cpp_cuda' because '{imported_module}' is already imported. See issue #1575 in llama-cpp-python. Please restart the server before attempting to use a different version of llama-cpp-python.")
|
||||||
try:
|
try:
|
||||||
return_lib = importlib.import_module('llama_cpp_cuda')
|
return_lib = importlib.import_module('llama_cpp_cuda')
|
||||||
|
imported_module = 'llama_cpp_cuda'
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if return_lib is None and not shared.args.cpu:
|
if return_lib is None and not shared.args.cpu:
|
||||||
|
if imported_module and imported_module != 'llama_cpp':
|
||||||
|
raise Exception(f"Cannot import 'llama_cpp' because '{imported_module}' is already imported. See issue #1575 in llama-cpp-python. Please restart the server before attempting to use a different version of llama-cpp-python.")
|
||||||
try:
|
try:
|
||||||
return_lib = importlib.import_module('llama_cpp')
|
return_lib = importlib.import_module('llama_cpp')
|
||||||
|
imported_module = 'llama_cpp'
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user