Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ dependencies = [
'Mako == 1.3.2',
'requests == 2.32.0',
'rich == 13.7.1',
'tiktoken == 0.6.0',
'tiktoken == 0.8.0',
'instructor == 1.3.5',
'PyYAML == 6.0.1',
'python-dotenv == 1.0.1',
Expand Down
45 changes: 45 additions & 0 deletions src/hackingBuddyGPT/utils/openai/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
from hackingBuddyGPT.utils.configurable import configurable, parameter
from hackingBuddyGPT.utils.llm_util import LLMResult, LLM

# Uncomment the following to log debug output
# import logging
# logging.basicConfig(level=logging.DEBUG)

@configurable("openai-compatible-llm-api", "OpenAI-compatible LLM API")
@dataclass
class OpenAIConnection(LLM):
Expand Down Expand Up @@ -36,9 +40,22 @@ def get_response(self, prompt, *, retry: int = 0, **kwargs) -> LLMResult:
headers = {"Authorization": f"Bearer {self.api_key}"}
data = {'model': self.model, 'messages': [{'role': 'user', 'content': prompt}]}

# Log the request payload
#
# Uncomment the following to log debug output
# logging.debug(f"Request payload: {data}")

try:
tic = time.perf_counter()
response = requests.post(f'{self.api_url}{self.api_path}', headers=headers, json=data, timeout=self.api_timeout)

# Log response headers, status, and body
#
# Uncomment the following to log debug output
# logging.debug(f"Response Headers: {response.headers}")
# logging.debug(f"Response Status: {response.status_code}")
# logging.debug(f"Response Body: {response.text}")

if response.status_code == 429:
print(f"[RestAPI-Connector] running into rate-limits, waiting for {self.api_backoff} seconds")
time.sleep(self.api_backoff)
Expand Down Expand Up @@ -95,3 +112,31 @@ class GPT4(OpenAIConnection):
class GPT4Turbo(OpenAIConnection):
model: str = "gpt-4-turbo-preview"
context_size: int = 128000


@configurable("openai/gpt-4o", "OpenAI GPT-4o")
@dataclass
class GPT4oMini(OpenAIConnection):
model: str = "gpt-4o"
context_size: int = 128000


@configurable("openai/gpt-4o-mini", "OpenAI GPT-4o-mini")
@dataclass
class GPT4oMini(OpenAIConnection):
model: str = "gpt-4o-mini"
context_size: int = 128000


@configurable("openai/o1-preview", "OpenAI o1-preview")
@dataclass
class O1Preview(OpenAIConnection):
model: str = "o1-preview"
context_size: int = 128000


@configurable("openai/o1-mini", "OpenAI o1-mini")
@dataclass
class O1Mini(OpenAIConnection):
model: str = "o1-mini"
context_size: int = 128000
Loading