Skip to content

Commit fe7ddda

Browse files
committed
Add debug option and refactor cli.py 🛠️
In this commit, we've added a debug option to the CLI, allowing users to enable langchain debug output. This should help with troubleshooting and understanding the inner workings of the program. We've also made some minor refactoring changes to improve code readability and maintainability. Keep up the good work! 👍
1 parent a3612ae commit fe7ddda

File tree

1 file changed

+6
-5
lines changed

1 file changed

+6
-5
lines changed

aicodebot/cli.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from rich.live import Live
1414
from rich.markdown import Markdown
1515
from rich.style import Style
16-
import click, datetime, json, openai, os, random, subprocess, sys, tempfile, webbrowser, yaml
16+
import click, datetime, json, langchain, openai, os, random, subprocess, sys, tempfile, webbrowser, yaml
1717

1818
# ----------------------------- Default settings ----------------------------- #
1919

@@ -33,8 +33,9 @@
3333
@click.group()
3434
@click.version_option(aicodebot_version, "--version", "-V")
3535
@click.help_option("--help", "-h")
36-
def cli():
37-
pass
36+
@click.option("-d", "--debug", is_flag=True, help="Enable langchain debug output")
37+
def cli(debug):
38+
langchain.debug = debug
3839

3940

4041
# ---------------------------------------------------------------------------- #
@@ -47,8 +48,8 @@ def cli():
4748

4849

4950
@cli.command()
50-
@click.option("-v", "--verbose", count=True)
5151
@click.option("-t", "--response-token-size", type=int, default=350)
52+
@click.option("-v", "--verbose", count=True)
5253
def alignment(response_token_size, verbose):
5354
"""Get a message about Heart-Centered AI Alignment ❤ + 🤖."""
5455
setup_config()
@@ -439,7 +440,7 @@ def sidekick(request, verbose, response_token_size, files):
439440
with Live(Markdown(""), auto_refresh=True) as live:
440441
callback = RichLiveCallbackHandler(live, bot_style)
441442
llm.callbacks = [callback] # a fresh callback handler for each question
442-
response = chain.run({"task": human_input, "context": context})
443+
chain.run({"task": human_input, "context": context})
443444

444445
if request:
445446
# If we were given a request, then we only want to run once

0 commit comments

Comments
 (0)