mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-24 00:46:53 +01:00
Lint
This commit is contained in:
parent
923c8e25fb
commit
510a01ef46
@ -21,7 +21,6 @@ import tqdm
|
|||||||
from requests.adapters import HTTPAdapter
|
from requests.adapters import HTTPAdapter
|
||||||
from tqdm.contrib.concurrent import thread_map
|
from tqdm.contrib.concurrent import thread_map
|
||||||
|
|
||||||
|
|
||||||
base = "https://huggingface.co"
|
base = "https://huggingface.co"
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ from collections import deque
|
|||||||
import tiktoken
|
import tiktoken
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
from transformers import LogitsProcessor, LogitsProcessorList
|
||||||
|
|
||||||
from extensions.openai.errors import InvalidRequestError
|
from extensions.openai.errors import InvalidRequestError
|
||||||
from extensions.openai.utils import debug_msg
|
from extensions.openai.utils import debug_msg
|
||||||
from modules import shared
|
from modules import shared
|
||||||
@ -15,7 +17,6 @@ from modules.chat import (
|
|||||||
)
|
)
|
||||||
from modules.presets import load_preset_memoized
|
from modules.presets import load_preset_memoized
|
||||||
from modules.text_generation import decode, encode, generate_reply
|
from modules.text_generation import decode, encode, generate_reply
|
||||||
from transformers import LogitsProcessor, LogitsProcessorList
|
|
||||||
|
|
||||||
|
|
||||||
class LogitsBiasProcessor(LogitsProcessor):
|
class LogitsBiasProcessor(LogitsProcessor):
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from extensions.openai.errors import ServiceUnavailableError
|
from extensions.openai.errors import ServiceUnavailableError
|
||||||
from extensions.openai.utils import debug_msg, float_list_to_base64
|
from extensions.openai.utils import debug_msg, float_list_to_base64
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
|
@ -2,6 +2,7 @@ import os
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from extensions.openai.errors import ServiceUnavailableError
|
from extensions.openai.errors import ServiceUnavailableError
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from extensions.openai.embeddings import get_embeddings
|
|
||||||
from numpy.linalg import norm
|
from numpy.linalg import norm
|
||||||
|
|
||||||
|
from extensions.openai.embeddings import get_embeddings
|
||||||
|
|
||||||
moderations_disabled = False # return 0/false
|
moderations_disabled = False # return 0/false
|
||||||
category_embeddings = None
|
category_embeddings = None
|
||||||
antonym_embeddings = None
|
antonym_embeddings = None
|
||||||
|
@ -4,26 +4,27 @@ import os
|
|||||||
import traceback
|
import traceback
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
|
import speech_recognition as sr
|
||||||
|
import uvicorn
|
||||||
|
from fastapi import Depends, FastAPI, Header, HTTPException
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from fastapi.requests import Request
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from pydub import AudioSegment
|
||||||
|
from sse_starlette import EventSourceResponse
|
||||||
|
|
||||||
import extensions.openai.completions as OAIcompletions
|
import extensions.openai.completions as OAIcompletions
|
||||||
import extensions.openai.embeddings as OAIembeddings
|
import extensions.openai.embeddings as OAIembeddings
|
||||||
import extensions.openai.images as OAIimages
|
import extensions.openai.images as OAIimages
|
||||||
import extensions.openai.models as OAImodels
|
import extensions.openai.models as OAImodels
|
||||||
import extensions.openai.moderations as OAImoderations
|
import extensions.openai.moderations as OAImoderations
|
||||||
import speech_recognition as sr
|
|
||||||
import uvicorn
|
|
||||||
from extensions.openai.errors import ServiceUnavailableError
|
from extensions.openai.errors import ServiceUnavailableError
|
||||||
from extensions.openai.tokens import token_count, token_decode, token_encode
|
from extensions.openai.tokens import token_count, token_decode, token_encode
|
||||||
from extensions.openai.utils import _start_cloudflared
|
from extensions.openai.utils import _start_cloudflared
|
||||||
from fastapi import Depends, FastAPI, Header, HTTPException
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
from fastapi.requests import Request
|
|
||||||
from fastapi.responses import JSONResponse
|
|
||||||
from modules import shared
|
from modules import shared
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
from modules.models import unload_model
|
from modules.models import unload_model
|
||||||
from modules.text_generation import stop_everything_event
|
from modules.text_generation import stop_everything_event
|
||||||
from pydub import AudioSegment
|
|
||||||
from sse_starlette import EventSourceResponse
|
|
||||||
|
|
||||||
from .typing import (
|
from .typing import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from modules.logging_colors import logger
|
from modules.logging_colors import logger
|
||||||
|
|
||||||
if Path('../webui.py').exists():
|
if Path('../webui.py').exists():
|
||||||
|
Loading…
Reference in New Issue
Block a user