Skip to content

Commit

Permalink
Add local models to gui, Fix You Provider, add AsyncClient
Browse files Browse the repository at this point in the history
  • Loading branch information
hlohaus committed Apr 7, 2024
1 parent 674ba8f commit b35dfcd
Show file tree
Hide file tree
Showing 37 changed files with 685 additions and 564 deletions.
2 changes: 1 addition & 1 deletion g4f/Provider/Aura.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ async def create_async_generator(
messages: Messages,
proxy: str = None,
temperature: float = 0.5,
max_tokens: int = 8192.
max_tokens: int = 8192,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
Expand Down
70 changes: 20 additions & 50 deletions g4f/Provider/DeepInfra.py
Original file line number Diff line number Diff line change
@@ -1,89 +1,59 @@
from __future__ import annotations

import json
import requests
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests import StreamSession, raise_for_status
from .needs_auth.Openai import Openai

class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
class DeepInfra(Openai):
url = "https://deepinfra.com"
working = True
needs_auth = False
supports_stream = True
supports_message_history = True
default_model = 'meta-llama/Llama-2-70b-chat-hf'

@classmethod
def get_models(cls):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
cls.models = [model['model_name'] for model in models]
cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
return cls.models

@classmethod
async def create_async_generator(
def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
auth: str = None,
api_base: str = "https://api.deepinfra.com/v1/openai",
temperature: float = 0.7,
max_tokens: int = 1028,
**kwargs
) -> AsyncResult:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Content-Type': 'application/json',
'Content-Type': None,
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
'accept': 'text/event-stream',
'Accept': None,
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
if auth:
headers['Authorization'] = f"bearer {auth}"

async with StreamSession(headers=headers,
timeout=timeout,
proxies={"https": proxy},
impersonate="chrome110"
) as session:
json_data = {
'model' : cls.get_model(model),
'messages': messages,
'temperature': kwargs.get("temperature", 0.7),
'max_tokens': kwargs.get("max_tokens", 512),
'stop': kwargs.get("stop", []),
'stream' : True
}
async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
json=json_data) as response:
await raise_for_status(response)
first = True
async for line in response.iter_lines():
if not line.startswith(b"data: "):
continue
try:
json_line = json.loads(line[6:])
choices = json_line.get("choices", [{}])
finish_reason = choices[0].get("finish_reason")
if finish_reason:
break
token = choices[0].get("delta", {}).get("content")
if token:
if first:
token = token.lstrip()
if token:
first = False
yield token
except Exception:
raise RuntimeError(f"Response: {line}")
return super().create_async_generator(
model, messages,
stream=stream,
api_base=api_base,
temperature=temperature,
max_tokens=max_tokens,
headers=headers,
**kwargs
)
2 changes: 1 addition & 1 deletion g4f/Provider/GeminiPro.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ async def create_async_generator(
if not response.ok:
data = await response.json()
data = data[0] if isinstance(data, list) else data
raise RuntimeError(f"Response {response.status}: {data["error"]["message"]}")
raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
if stream:
lines = []
async for chunk in response.content:
Expand Down
42 changes: 42 additions & 0 deletions g4f/Provider/Local.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from __future__ import annotations

from ..locals.models import get_models
try:
from ..locals.provider import LocalProvider
has_requirements = True
except ModuleNotFoundError:
has_requirements = False

from ..typing import Messages, CreateResult
from ..providers.base_provider import AbstractProvider, ProviderModelMixin
from ..errors import MissingRequirementsError

class Local(AbstractProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
supports_stream = True

@classmethod
def get_models(cls):
if not cls.models:
cls.models = list(get_models())
cls.default_model = cls.models[0]
return cls.models

@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
return LocalProvider.create_completion(
cls.get_model(model),
messages,
stream,
**kwargs
)
8 changes: 6 additions & 2 deletions g4f/Provider/You.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from ..requests import StreamSession, raise_for_status
from ..errors import MissingRequirementsError

from .you.har_file import get_dfp_telemetry_id

class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com"
working = True
Expand Down Expand Up @@ -45,6 +47,7 @@ async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
image: ImageType = None,
image_name: str = None,
proxy: str = None,
Expand All @@ -56,7 +59,7 @@ async def create_async_generator(
if image is not None:
chat_mode = "agent"
elif not model or model == cls.default_model:
chat_mode = "default"
...
elif model.startswith("dall-e"):
chat_mode = "create"
else:
Expand Down Expand Up @@ -108,7 +111,7 @@ async def create_async_generator(
data = json.loads(line[6:])
if event == "youChatToken" and event in data:
yield data[event]
elif event == "youChatUpdate" and "t" in data:
elif event == "youChatUpdate" and "t" in data and data["t"] is not None:
match = re.search(r"!\[fig\]\((.+?)\)", data["t"])
if match:
yield ImageResponse(match.group(1), messages[-1]["content"])
Expand Down Expand Up @@ -177,6 +180,7 @@ async def create_cookies(cls, client: StreamSession) -> Cookies:
"X-SDK-Parent-Host": cls.url
},
json={
"dfp_telemetry_id": await get_dfp_telemetry_id(),
"email": f"{user_uuid}@gmail.com",
"password": f"{user_uuid}#{user_uuid}",
"session_duration_minutes": 129600
Expand Down
1 change: 1 addition & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .Local import Local
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Vercel import Vercel
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies


class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
Expand Down
3 changes: 2 additions & 1 deletion g4f/Provider/deprecated/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,5 @@
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
from .Ylokh import Ylokh
from .OpenAssistant import OpenAssistant
2 changes: 1 addition & 1 deletion g4f/Provider/needs_auth/Gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from ...typing import Messages, Cookies, ImageType, AsyncResult
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
from requests.raise_for_status import raise_for_status
from ...requests.raise_for_status import raise_for_status
from ...errors import MissingAuthError, MissingRequirementsError
from ...image import to_bytes, ImageResponse
from ...webdriver import get_browser, get_driver_cookies
Expand Down
96 changes: 65 additions & 31 deletions g4f/Provider/needs_auth/Openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
import json

from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
from ...typing import AsyncResult, Messages
from ...typing import Union, Optional, AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...requests import StreamSession
from ...errors import MissingAuthError
from ...errors import MissingAuthError, ResponseError

class Openai(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://openai.com"
Expand All @@ -27,48 +27,82 @@ async def create_async_generator(
temperature: float = None,
max_tokens: int = None,
top_p: float = None,
stop: str = None,
stop: Union[str, list[str]] = None,
stream: bool = False,
headers: dict = None,
extra_data: dict = {},
**kwargs
) -> AsyncResult:
if api_key is None:
if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
async with StreamSession(
proxies={"all": proxy},
headers=cls.get_headers(api_key),
headers=cls.get_headers(stream, api_key, headers),
timeout=timeout
) as session:
data = {
"messages": messages,
"model": cls.get_model(model),
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"stop": stop,
"stream": stream,
}
data = filter_none(
messages=messages,
model=cls.get_model(model),
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=stop,
stream=stream,
**extra_data
)
async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response:
await raise_for_status(response)
async for line in response.iter_lines():
if line.startswith(b"data: ") or not stream:
async for chunk in cls.read_line(line[6:] if stream else line, stream):
yield chunk
if not stream:
data = await response.json()
choice = data["choices"][0]
if "content" in choice["message"]:
yield choice["message"]["content"].strip()
finish = cls.read_finish_reason(choice)
if finish is not None:
yield finish
else:
first = True
async for line in response.iter_lines():
if line.startswith(b"data: "):
chunk = line[6:]
if chunk == b"[DONE]":
break
data = json.loads(chunk)
if "error_message" in data:
raise ResponseError(data["error_message"])
choice = data["choices"][0]
if "content" in choice["delta"] and choice["delta"]["content"]:
delta = choice["delta"]["content"]
if first:
delta = delta.lstrip()
if delta:
first = False
yield delta
finish = cls.read_finish_reason(choice)
if finish is not None:
yield finish

@staticmethod
async def read_line(line: str, stream: bool):
if line == b"[DONE]":
return
choice = json.loads(line)["choices"][0]
if stream and "content" in choice["delta"] and choice["delta"]["content"]:
yield choice["delta"]["content"]
elif not stream and "content" in choice["message"]:
yield choice["message"]["content"]
def read_finish_reason(choice: dict) -> Optional[FinishReason]:
if "finish_reason" in choice and choice["finish_reason"] is not None:
yield FinishReason(choice["finish_reason"])
return FinishReason(choice["finish_reason"])

@staticmethod
def get_headers(api_key: str) -> dict:
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {
"Authorization": f"Bearer {api_key}",
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
}
**(
{"Authorization": f"Bearer {api_key}"}
if cls.needs_auth and api_key is not None
else {}
),
**({} if headers is None else headers)
}

def filter_none(**kwargs) -> dict:
return {
key: value
for key, value in kwargs.items()
if value is not None
}
Loading

0 comments on commit b35dfcd

Please sign in to comment.