|
1 | 1 | from aicodebot import version as aicodebot_version |
2 | | -from aicodebot.agents import get_agent |
3 | 2 | from aicodebot.helpers import exec_and_get_output, get_token_length, git_diff_context |
| 3 | +from aicodebot.prompts import generate_sidekick_prompt |
4 | 4 | from dotenv import load_dotenv |
5 | 5 | from langchain.callbacks.base import BaseCallbackHandler |
6 | 6 | from langchain.chains import LLMChain |
7 | 7 | from langchain.chat_models import ChatOpenAI |
| 8 | +from langchain.memory import ConversationTokenBufferMemory |
8 | 9 | from langchain.prompts import load_prompt |
9 | 10 | from openai.api_resources import engine |
10 | 11 | from pathlib import Path |
@@ -264,31 +265,53 @@ def review(commit, verbose): |
264 | 265 |
|
265 | 266 |
|
266 | 267 | @cli.command |
267 | | -@click.option("--task", "-t", help="The task you want to perform - a description of what you want to do.") |
| 268 | +@click.option("--request", "-r", help="What to ask your sidekick to do") |
268 | 269 | @click.option("-v", "--verbose", count=True) |
269 | | -def sidekick(task, verbose): |
270 | | - """ALPHA/EXPERIMENTAL: Get coding help from your AI sidekick.""" |
271 | | - console.print( |
272 | | - "⚠️ WARNING: The 'sidekick' feature is currently experimental and, frankly, it sucks right now. " |
273 | | - "Due to the token limitations with large language models, the amount of context " |
274 | | - "that can be sent back and forth is limited, and slow. This means that sidekick will struggle with " |
275 | | - "complex tasks and will take longer than a human for simpler tasks.\n" |
276 | | - "Play with it, but don't expect too much. Do you feel like contributing? 😃\n" |
277 | | - "See docs/sidekick.md for more information.", |
278 | | - style=warning_style, |
279 | | - ) |
| 270 | +def sidekick(request, verbose): |
| 271 | + """EXPERIMENTAL: Coding help from your AI sidekick""" |
| 272 | + console.print("This is an experimental feature. Play with it, but don't count on it.", style=warning_style) |
280 | 273 |
|
281 | 274 | setup_environment() |
282 | 275 |
|
283 | | - model = get_llm_model() |
284 | | - llm = ChatOpenAI(model=model, temperature=PRECISE_TEMPERATURE, max_tokens=2000, verbose=verbose) |
| 276 | + # Generate the prompt with the appropriate context |
| 277 | + prompt = generate_sidekick_prompt(request) |
| 278 | + model = get_llm_model(get_token_length(prompt.template)) |
| 279 | + |
| 280 | + llm = ChatOpenAI( |
| 281 | + model=model, |
| 282 | + temperature=PRECISE_TEMPERATURE, |
| 283 | + max_tokens=DEFAULT_MAX_TOKENS * 2, |
| 284 | + verbose=verbose, |
| 285 | + streaming=True, |
| 286 | + ) |
285 | 287 |
|
286 | | - agent = get_agent("sidekick", llm, verbose) |
| 288 | + # Set up the chain |
| 289 | + chain = LLMChain( |
| 290 | + llm=llm, |
| 291 | + prompt=prompt, |
| 292 | + memory=ConversationTokenBufferMemory(llm=llm, max_token_limit=DEFAULT_MAX_TOKENS), |
| 293 | + verbose=verbose, |
| 294 | + ) |
287 | 295 |
|
288 | | - with console.status("Thinking", spinner=DEFAULT_SPINNER): |
289 | | - response = agent({"input": task}) |
290 | | - console.print("") |
291 | | - console.print(response["output"], style=bot_style) |
| 296 | + while True: # continuous loop for multiple questions |
| 297 | + if request: |
| 298 | + user_input = request |
| 299 | + request = None # clear the command line request once we've handled it |
| 300 | + else: |
| 301 | + user_input = click.prompt( |
| 302 | + "Enter a question OR (q) quit, OR (e) edit for entering a question in your editor\n>>>", |
| 303 | + prompt_suffix="", |
| 304 | + ) |
| 305 | + if user_input.lower() == "q": |
| 306 | + break |
| 307 | + elif user_input.lower() == "e": |
| 308 | + user_input = click.edit() |
| 309 | + |
| 310 | + with Live(Markdown(""), auto_refresh=True) as live: |
| 311 | + callback = RichLiveCallbackHandler(live) |
| 312 | + callback.buffer = [] |
| 313 | + llm.callbacks = [callback] |
| 314 | + chain.run(user_input) |
292 | 315 |
|
293 | 316 |
|
294 | 317 | # ---------------------------------------------------------------------------- # |
@@ -393,7 +416,7 @@ def __init__(self, live): |
393 | 416 |
|
394 | 417 | def on_llm_new_token(self, token, **kwargs): |
395 | 418 | self.buffer.append(token) |
396 | | - self.live.update(Markdown("".join(self.buffer))) |
| 419 | + self.live.update(Markdown("".join(self.buffer), style=bot_style)) |
397 | 420 |
|
398 | 421 |
|
399 | 422 | if __name__ == "__main__": |
|
0 commit comments