diff --git a/docs/source/users/index.md b/docs/source/users/index.md
index db9880d4d..52c758c15 100644
--- a/docs/source/users/index.md
+++ b/docs/source/users/index.md
@@ -542,3 +542,9 @@ produced the following Python error:
Write a new version of this code that does not produce that error.
```
+As a shortcut for explaining errors, you can use the `%ai error` command, which will explain the most recent error using the model of your choice.
+
+```
+%ai error anthropic:claude-v1.2
+```
+
diff --git a/examples/commands.ipynb b/examples/commands.ipynb
index 347f5f1d9..76634cee8 100644
--- a/examples/commands.ipynb
+++ b/examples/commands.ipynb
@@ -41,32 +41,95 @@
"metadata": {
"tags": []
},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Usage: %%ai [OPTIONS] MODEL_ID\n",
+ "\n",
+ " Invokes a language model identified by MODEL_ID, with the prompt being\n",
+ " contained in all lines after the first. Both local model IDs and global\n",
+ " model IDs (with the provider ID explicitly prefixed, followed by a colon)\n",
+ " are accepted.\n",
+ "\n",
+ " To view available language models, please run `%ai list`.\n",
+ "\n",
+ "Options:\n",
+ " -f, --format [code|html|image|json|markdown|math|md|text]\n",
+ " IPython display to use when rendering\n",
+ " output. [default=\"markdown\"]\n",
+ " -r, --reset Clears the conversation transcript used when\n",
+ " interacting with an OpenAI chat model\n",
+ " provider. Does nothing with other providers.\n",
+ " --help Show this message and exit.\n",
+ "------------------------------------------------------------------------------\n",
+ "Usage: %%ai [OPTIONS] MODEL_ID\n",
+ "\n",
+ " Invokes a language model identified by MODEL_ID, with the prompt being\n",
+ " contained in all lines after the first. Both local model IDs and global\n",
+ " model IDs (with the provider ID explicitly prefixed, followed by a colon)\n",
+ " are accepted.\n",
+ "\n",
+ " To view available language models, please run `%ai list`.\n",
+ "\n",
+ "Options:\n",
+ " -f, --format [code|html|image|json|markdown|math|md|text]\n",
+ " IPython display to use when rendering\n",
+ " output. [default=\"markdown\"]\n",
+ " -r, --reset Clears the conversation transcript used when\n",
+ " interacting with an OpenAI chat model\n",
+ " provider. Does nothing with other providers.\n",
+ " --help Show this message and exit.\n",
+ "------------------------------------------------------------------------------\n",
+ "Usage: %ai [OPTIONS] COMMAND [ARGS]...\n",
+ "\n",
+ " Invokes a subcommand.\n",
+ "\n",
+ "Options:\n",
+ " --help Show this message and exit.\n",
+ "\n",
+ "Commands:\n",
+ " error Explains the most recent error.\n",
+ " help Show this message and exit.\n",
+ " list List language models. See `%ai list --help` for options.\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "%ai help"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "e1f2b767-0834-4b21-b132-093730efaffb",
+ "metadata": {
+ "tags": []
+ },
"outputs": [
{
"data": {
"text/markdown": [
- "| Command | Description |\n",
- "| ------- | ----------- |\n",
- "| `help` | Display a list of supported commands|\n",
- "| `list` | Display a list of models that you can use (optionally, for a single provider)|\n"
+ "There have been no errors since the kernel started."
],
"text/plain": [
- "help - Display a list of supported commands\n",
- "list - Display a list of models that you can use (optionally, for a single provider)\n"
+ "There have been no errors since the kernel started."
]
},
- "execution_count": 2,
+ "execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "%ai help"
+ "%ai error chatgpt"
]
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 4,
"id": "0f073caa-265d-40d6-b537-d025b8df9f41",
"metadata": {},
"outputs": [
@@ -75,19 +138,19 @@
"text/markdown": [
"Cannot determine model provider from model ID `foo`.\n",
"\n",
- "To see a list of models you can use, run `%ai list`.\n",
+ "To see a list of models you can use, run `%ai list`\n",
"\n",
"If you were trying to run a command, run `%ai help` to see a list of commands."
],
"text/plain": [
"Cannot determine model provider from model ID 'foo'.\n",
"\n",
- "To see a list of models you can use, run '%ai list'.\n",
+ "To see a list of models you can use, run '%ai list'\n",
"\n",
"If you were trying to run a command, run '%ai help' to see a list of commands."
]
},
- "execution_count": 3,
+ "execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@@ -98,7 +161,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 5,
"id": "bad2d8a8-6141-4247-9af7-7583426c59a6",
"metadata": {},
"outputs": [
@@ -113,6 +176,7 @@
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | ✅ | This provider does not define a list of models. |\n",
"| `openai` | `OPENAI_API_KEY` | ✅ | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
"| `openai-chat` | `OPENAI_API_KEY` | ✅ | `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301` |\n",
+ "| `openai-chat-new` | `OPENAI_API_KEY` | ✅ | `openai-chat-new:gpt-4`, `openai-chat-new:gpt-4-0314`, `openai-chat-new:gpt-4-32k`, `openai-chat-new:gpt-4-32k-0314`, `openai-chat-new:gpt-3.5-turbo`, `openai-chat-new:gpt-3.5-turbo-0301` |\n",
"| `sagemaker-endpoint` | Not applicable. | N/A | This provider does not define a list of models. |\n"
],
"text/plain": [
@@ -166,12 +230,21 @@
"* openai-chat:gpt-3.5-turbo\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"\n",
+ "openai-chat-new\n",
+ "Requires environment variable OPENAI_API_KEY (set)\n",
+ "* openai-chat-new:gpt-4\n",
+ "* openai-chat-new:gpt-4-0314\n",
+ "* openai-chat-new:gpt-4-32k\n",
+ "* openai-chat-new:gpt-4-32k-0314\n",
+ "* openai-chat-new:gpt-3.5-turbo\n",
+ "* openai-chat-new:gpt-3.5-turbo-0301\n",
+ "\n",
"sagemaker-endpoint\n",
"* This provider does not define a list of models.\n",
"\n"
]
},
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -182,7 +255,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 6,
"id": "4d84fcac-7348-4c02-9ec3-34b300ec8459",
"metadata": {},
"outputs": [
@@ -208,7 +281,7 @@
"\n"
]
},
- "execution_count": 5,
+ "execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
@@ -219,9 +292,118 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 7,
"id": "b56ff0e3-42c2-4927-affd-be6a089dfa43",
"metadata": {},
+ "outputs": [
+ {
+ "ename": "SyntaxError",
+ "evalue": "Missing parentheses in call to 'print'. Did you mean print(...)? (1142230402.py, line 1)",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;36m Cell \u001b[0;32mIn[7], line 1\u001b[0;36m\u001b[0m\n\u001b[0;31m print 'foo'\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m Missing parentheses in call to 'print'. Did you mean print(...)?\n"
+ ]
+ }
+ ],
+ "source": [
+ "print 'foo'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "63581a74-9237-4f11-aff2-48612c97cb27",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/markdown": [
+ "The error message \"SyntaxError: Missing parentheses in call to 'print'. Did you mean print(...)?\", occurs when attempting to use the print function without parentheses in Python 3, which is not syntactically valid. In Python 2, it was possible to use the print statement without parentheses, but in Python 3 it has become a function and its use requires parentheses. The error message suggests that the print statement should be replaced with print() to rectify the error."
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {
+ "text/markdown": {
+ "jupyter_ai": {
+ "model_id": "gpt-3.5-turbo",
+ "provider_id": "openai-chat"
+ }
+ }
+ },
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "%ai error chatgpt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "1afe4536-f908-4bd7-aec4-f8d1cc3bf01f",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/opt/miniconda3/envs/jupyter-ai/lib/python3.10/site-packages/langchain/llms/anthropic.py:134: UserWarning: This Anthropic LLM is deprecated. Please use `from langchain.chat_models import ChatAnthropic` instead\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "data": {
+ "text/markdown": [
+ "\n",
+ "The error `Cell In[7], line 1 print 'foo' ^ SyntaxError: Missing parentheses in call to 'print' . Did you mean print(...)?` \n",
+ "is occurring because in Python 3, the `print` statement has changed. \n",
+ "\n",
+ "In Python 2, you could simply do:\n",
+ "`print 'foo'`\n",
+ "\n",
+ "to print the string `foo`. \n",
+ "\n",
+ "However, in Python 3, the `print` function requires parentheses:\n",
+ "`print('foo')`\n",
+ "\n",
+ "So the error is telling you that you're trying to use the Python 2 `print` statement in Python 3 code. \n",
+ "It's suggesting that you likely meant to call the `print()` function instead, with parentheses: \n",
+ "`print('foo')`.\n",
+ "\n",
+ "Adding the parentheses will fix the error."
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {
+ "text/markdown": {
+ "jupyter_ai": {
+ "model_id": "claude-v1.2",
+ "provider_id": "anthropic"
+ }
+ }
+ },
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "%ai error anthropic:claude-v1.2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d47aa5ec-31e0-4670-b1d1-6b4cd7f75832",
+ "metadata": {},
"outputs": [],
"source": []
}
@@ -242,7 +424,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.10.8"
}
},
"nbformat": 4,
diff --git a/examples/errors.ipynb b/examples/errors.ipynb
index ea86c71e4..d6d733ea5 100644
--- a/examples/errors.ipynb
+++ b/examples/errors.ipynb
@@ -70,17 +70,21 @@
{
"data": {
"text/markdown": [
- "```\n",
- "# Explanation:\n",
- "The error occurred because the code attempted to divide the number 2 by 0, which is undefined in mathematics and programming. This resulted in a ZeroDivisionError. It is important to avoid division by zero in your programs to ensure they run smoothly.\n",
- "```"
+ "The error is a `ZeroDivisionError`. It occurred because the code attempted to divide a number by zero, which is mathematically undefined. This error message specifically states \"division by zero\", indicating that there was an attempt to divide by zero."
],
"text/plain": [
""
]
},
"execution_count": 4,
- "metadata": {},
+ "metadata": {
+ "text/markdown": {
+ "jupyter_ai": {
+ "model_id": "gpt-3.5-turbo",
+ "provider_id": "openai-chat"
+ }
+ }
+ },
"output_type": "execute_result"
}
],
@@ -90,6 +94,16 @@
" {Err[3]}"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "686afba1-6a0d-4046-9125-ab78e5414b70",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "\n",
+ "You can also use the `%ai error` command, which takes the same parameters as the basic `%%ai` command, to explain the most recent error."
+ ]
+ },
{
"cell_type": "code",
"execution_count": 5,
@@ -146,24 +160,28 @@
{
"data": {
"text/markdown": [
- "```\n",
- "# Explanation:\n",
- "The code attempted to use the `print` statement without enclosing the printed text in parentheses. In Python 2, `print` is a statement and does not require parentheses. However, in Python 3, `print` is a function and requires parentheses even when printing a single value or string. The error message suggests using `print()` instead of `print` to solve the SyntaxError.\n",
- "```"
+ "The error is a `SyntaxError`. It occurred because the code uses Python 2 syntax to print a string without parentheses, but the notebook is running in Python 3, which requires parentheses for `print` statements. \n",
+ "\n",
+ "The error message specifies that the issue is \"Missing parentheses in call to 'print'\". The message also suggests a possible solution by stating \"Did you mean print(...)?\" which indicates that enclosing the string \"foo\" in parentheses will resolve the syntax error."
],
"text/plain": [
""
]
},
"execution_count": 7,
- "metadata": {},
+ "metadata": {
+ "text/markdown": {
+ "jupyter_ai": {
+ "model_id": "gpt-3.5-turbo",
+ "provider_id": "openai-chat"
+ }
+ }
+ },
"output_type": "execute_result"
}
],
"source": [
- "%%ai chatgpt\n",
- "Explain the following Python error:\n",
- " {Err[5]}"
+ "%ai error chatgpt"
]
},
{
@@ -171,7 +189,28 @@
"execution_count": 8,
"id": "f32009ae-c7d5-4649-aa13-852e193ec604",
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "AI generated code inserted below ⬇️"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {
+ "text/html": {
+ "jupyter_ai": {
+ "model_id": "gpt-3.5-turbo",
+ "provider_id": "openai-chat"
+ }
+ }
+ },
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"%%ai chatgpt --format code\n",
"The following Python code:\n",
@@ -188,7 +227,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "41d043ab-54b1-411a-9449-7d4c7232988f",
+ "id": "8b10227e-53b3-48e2-bba0-104cc222ffc5",
"metadata": {},
"outputs": [],
"source": [
@@ -220,7 +259,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.9"
+ "version": "3.10.8"
}
},
"nbformat": 4,
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
index 01e47196e..bc1f9b362 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
@@ -12,7 +12,7 @@
from jupyter_ai_magics.utils import decompose_model_id, load_providers
from .providers import BaseProvider
-from .parsers import cell_magic_parser, line_magic_parser, CellArgs, HelpArgs, ListArgs
+from .parsers import cell_magic_parser, line_magic_parser, CellArgs, ErrorArgs, HelpArgs, ListArgs
MODEL_ID_ALIASES = {
@@ -72,6 +72,15 @@ def _repr_mimebundle_(self, include=None, exclude=None):
PROVIDER_NO_MODELS = 'This provider does not define a list of models.'
+CANNOT_DETERMINE_MODEL_TEXT = """Cannot determine model provider from model ID '{0}'.
+
+To see a list of models you can use, run '%ai list'"""
+
+CANNOT_DETERMINE_MODEL_MARKDOWN = """Cannot determine model provider from model ID `{0}`.
+
+To see a list of models you can use, run `%ai list`"""
+
+
PROMPT_TEMPLATES_BY_FORMAT = {
"code": '{prompt}\n\nProduce output as source code only, with no text or explanation before or after it.',
"html": '{prompt}\n\nProduce output in HTML format only, with no markup before or afterward.',
@@ -83,10 +92,7 @@ def _repr_mimebundle_(self, include=None, exclude=None):
"text": '{prompt}' # No customization
}
-AI_COMMANDS = {
- "help": "Display a list of supported commands",
- "list": "Display a list of models that you can use (optionally, for a single provider)"
-}
+AI_COMMANDS = { "error", "help", "list" }
class FormatDict(dict):
"""Subclass of dict to be passed to str#format(). Suppresses KeyError and
@@ -212,6 +218,36 @@ def _ai_list_command_text(self, single_provider=None):
return output
+ def handle_error(self, args: ErrorArgs):
+ no_errors = "There have been no errors since the kernel started."
+
+ # Find the most recent error.
+ ip = get_ipython()
+ if ('Err' not in ip.user_ns):
+ return TextOrMarkdown(no_errors, no_errors)
+
+ err = ip.user_ns['Err']
+ # Start from the previous execution count
+ excount = ip.execution_count - 1
+ last_error = None
+ while (excount >= 0 and last_error is None):
+ if(excount in err):
+ last_error = err[excount]
+ else:
+ excount = excount - 1;
+
+ if (last_error is None):
+ return TextOrMarkdown(no_errors, no_errors)
+
+ prompt = f"Explain the following error:\n\n{last_error}"
+ # Set CellArgs based on ErrorArgs
+ cell_args = CellArgs(
+ type="root",
+ model_id=args.model_id,
+ format=args.format,
+ reset=False)
+ return self.run_ai_cell(cell_args, prompt)
+
def _append_exchange_openai(self, prompt: str, output: str):
"""Appends a conversational exchange between user and an OpenAI Chat
model to a transcript that will be included in future exchanges."""
@@ -247,41 +283,7 @@ def handle_list(self, args: ListArgs):
self._ai_list_command_markdown(args.provider_id)
)
- @line_cell_magic
- def ai(self, line, cell=None):
- raw_args = line.split(' ')
- if cell:
- args = cell_magic_parser(raw_args, prog_name="%%ai", standalone_mode=False)
- else:
- args = line_magic_parser(raw_args, prog_name="%ai", standalone_mode=False)
-
- if args == 0:
- # this happens when `--help` is called on the root command, in which
- # case we want to exit early.
- return
-
- if args.type == "help":
- return self.handle_help(args)
- if args.type == "list":
- return self.handle_list(args)
-
- # hint to the IDE that this object must be of type `RootArgs`
- args: CellArgs = args
-
- if not cell:
- raise CellMagicError(
- """[0.8+]: To invoke a language model, you must use the `%%ai`
- cell magic. The `%ai` line magic is only for use with
- subcommands."""
- )
-
- prompt = cell.strip()
-
- # If the user is attempting to run a command, run the command separately.
- if (args.model_id in AI_COMMANDS):
- # The "prompt" is a list of arguments to the command, whitespace-delimited
- return self._ai_command(args.model_id, prompt)
-
+ def run_ai_cell(self, args: CellArgs, prompt: str):
# Apply a prompt template.
prompt = PROMPT_TEMPLATES_BY_FORMAT[args.format].format(prompt = prompt)
@@ -290,11 +292,9 @@ def ai(self, line, cell=None):
Provider = self._get_provider(provider_id)
if Provider is None:
return TextOrMarkdown(
- f"Cannot determine model provider from model ID '{args.model_id}'.\n\n"
- + "To see a list of models you can use, run '%ai list'.\n\n"
+ CANNOT_DETERMINE_MODEL_TEXT.format(args.model_id) + "\n\n"
+ "If you were trying to run a command, run '%ai help' to see a list of commands.",
- f"Cannot determine model provider from model ID `{args.model_id}`.\n\n"
- + "To see a list of models you can use, run `%ai list`.\n\n"
+ CANNOT_DETERMINE_MODEL_MARKDOWN.format(args.model_id) + "\n\n"
+ "If you were trying to run a command, run `%ai help` to see a list of commands."
)
@@ -314,10 +314,6 @@ def ai(self, line, cell=None):
f"Please specify it via `%env {auth_strategy.name}=token`. "
) from None
- # interpolate user namespace into prompt
- ip = get_ipython()
- prompt = prompt.format_map(FormatDict(ip.user_ns))
-
# configure and instantiate provider
provider_params = { "model_id": local_model_id }
if provider_id == "openai-chat":
@@ -353,6 +349,7 @@ def ai(self, line, cell=None):
text=output,
replace=False,
)
+ ip = get_ipython()
ip.payload_manager.write_payload(new_cell_payload)
return HTML('AI generated code inserted below ⬇️', metadata=md);
@@ -365,3 +362,41 @@ def ai(self, line, cell=None):
# finally, display output display
return output_display
+
+ @line_cell_magic
+ def ai(self, line, cell=None):
+ raw_args = line.split(' ')
+ if cell:
+ args = cell_magic_parser(raw_args, prog_name="%%ai", standalone_mode=False)
+ else:
+ args = line_magic_parser(raw_args, prog_name="%ai", standalone_mode=False)
+
+ if args == 0:
+ # this happens when `--help` is called on the root command, in which
+ # case we want to exit early.
+ return
+
+ if args.type == "error":
+ return self.handle_error(args)
+ if args.type == "help":
+ return self.handle_help(args)
+ if args.type == "list":
+ return self.handle_list(args)
+
+ # hint to the IDE that this object must be of type `RootArgs`
+ args: CellArgs = args
+
+ if not cell:
+ raise CellMagicError(
+ """[0.8+]: To invoke a language model, you must use the `%%ai`
+ cell magic. The `%ai` line magic is only for use with
+ subcommands."""
+ )
+
+ prompt = cell.strip()
+
+ # interpolate user namespace into prompt
+ ip = get_ipython()
+ prompt = prompt.format_map(FormatDict(ip.user_ns))
+
+ return self.run_ai_cell(args, prompt)
diff --git a/packages/jupyter-ai-magics/jupyter_ai_magics/parsers.py b/packages/jupyter-ai-magics/jupyter_ai_magics/parsers.py
index 5847cb5a3..c4820f5f4 100644
--- a/packages/jupyter-ai-magics/jupyter_ai_magics/parsers.py
+++ b/packages/jupyter-ai-magics/jupyter_ai_magics/parsers.py
@@ -4,6 +4,7 @@
FORMAT_CHOICES_TYPE = Literal["code", "html", "image", "json", "markdown", "math", "md", "text"]
FORMAT_CHOICES = list(get_args(FORMAT_CHOICES_TYPE))
+FORMAT_HELP = """IPython display to use when rendering output. [default="markdown"]"""
class CellArgs(BaseModel):
type: Literal["root"] = "root"
@@ -11,6 +12,12 @@ class CellArgs(BaseModel):
format: FORMAT_CHOICES_TYPE
reset: bool
+# Should match CellArgs, but without "reset"
+class ErrorArgs(BaseModel):
+ type: Literal["error"] = "error"
+ model_id: str
+ format: FORMAT_CHOICES_TYPE
+
class HelpArgs(BaseModel):
type: Literal["help"] = "help"
@@ -33,7 +40,7 @@ def get_help(self, ctx):
@click.option('-f', '--format',
type=click.Choice(FORMAT_CHOICES, case_sensitive=False),
default="markdown",
- help="""IPython display to use when rendering output. [default="markdown"]"""
+ help=FORMAT_HELP
)
@click.option('-r', '--reset', is_flag=True,
help="""Clears the conversation transcript used when interacting with an
@@ -56,6 +63,20 @@ def line_magic_parser():
Invokes a subcommand.
"""
+@line_magic_parser.command(name='error')
+@click.argument('model_id')
+@click.option('-f', '--format',
+ type=click.Choice(FORMAT_CHOICES, case_sensitive=False),
+ default="markdown",
+ help=FORMAT_HELP
+)
+def error_subparser(**kwargs):
+ """
+ Explains the most recent error. Takes the same options (except -r) as
+ the basic `%%ai` command.
+ """
+ return ErrorArgs(**kwargs)
+
@line_magic_parser.command(name='help')
def help_subparser():
"""Show this message and exit."""