mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 16:07:17 +01:00
45abe0f74e
* server : replace behave with pytest * fix test on windows * misc * add more tests * more tests * styling * log less, fix embd test * added all sequential tests * fix coding style * fix save slot test * add parallel completion test * fix parallel test * remove feature files * update test docs * no cache_prompt for some tests * add test_cache_vs_nocache_prompt
16 lines
449 B
Python
16 lines
449 B
Python
import pytest
|
|
from utils import *
|
|
|
|
|
|
# ref: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test
|
|
@pytest.fixture(autouse=True)
|
|
def stop_server_after_each_test():
|
|
# do nothing before each test
|
|
yield
|
|
# stop all servers after each test
|
|
instances = set(
|
|
server_instances
|
|
) # copy the set to prevent 'Set changed size during iteration'
|
|
for server in instances:
|
|
server.stop()
|