Skip to content

Commit

Permalink
Adds %ai error command, documents it
Browse files Browse the repository at this point in the history
  • Loading branch information
JasonWeill committed May 12, 2023
1 parent 088e480 commit e6f291e
Show file tree
Hide file tree
Showing 2 changed files with 216 additions and 16 deletions.
169 changes: 158 additions & 11 deletions examples/commands.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,13 @@
"| Command | Description |\n",
"| ------- | ----------- |\n",
"| `help` | Display a list of supported commands|\n",
"| `list` | Display a list of models that you can use (optionally, for a single provider)|\n"
"| `list` | Display a list of models that you can use (optionally, for a single provider)|\n",
"| `error` | Explain the last error received. Takes a model, as %%ai does.|\n"
],
"text/plain": [
"help - Display a list of supported commands\n",
"list - Display a list of models that you can use (optionally, for a single provider)\n"
"list - Display a list of models that you can use (optionally, for a single provider)\n",
"error - Explain the last error received. Takes a model, as %%ai does.\n"
]
},
"execution_count": 2,
Expand All @@ -67,6 +69,32 @@
{
"cell_type": "code",
"execution_count": 3,
"id": "e1f2b767-0834-4b21-b132-093730efaffb",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/markdown": [
"There have been no errors since the kernel started."
],
"text/plain": [
"There have been no errors since the kernel started."
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%ai error chatgpt"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0f073caa-265d-40d6-b537-d025b8df9f41",
"metadata": {},
"outputs": [
Expand All @@ -75,19 +103,19 @@
"text/markdown": [
"Cannot determine model provider from model ID `foo`.\n",
"\n",
"To see a list of models you can use, run `%ai list`.\n",
"To see a list of models you can use, run `%ai list`\n",
"\n",
"If you were trying to run a command, run `%ai help` to see a list of commands."
],
"text/plain": [
"Cannot determine model provider from model ID 'foo'.\n",
"\n",
"To see a list of models you can use, run '%ai list'.\n",
"To see a list of models you can use, run '%ai list'\n",
"\n",
"If you were trying to run a command, run '%ai help' to see a list of commands."
]
},
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -98,7 +126,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"id": "bad2d8a8-6141-4247-9af7-7583426c59a6",
"metadata": {},
"outputs": [
Expand All @@ -113,6 +141,7 @@
"| `huggingface_hub` | `HUGGINGFACEHUB_API_TOKEN` | <abbr title=\"You have set this environment variable, so you can use this provider's models.\">✅</abbr> | This provider does not define a list of models. |\n",
"| `openai` | `OPENAI_API_KEY` | <abbr title=\"You have set this environment variable, so you can use this provider's models.\">✅</abbr> | `openai:text-davinci-003`, `openai:text-davinci-002`, `openai:text-curie-001`, `openai:text-babbage-001`, `openai:text-ada-001`, `openai:davinci`, `openai:curie`, `openai:babbage`, `openai:ada` |\n",
"| `openai-chat` | `OPENAI_API_KEY` | <abbr title=\"You have set this environment variable, so you can use this provider's models.\">✅</abbr> | `openai-chat:gpt-4`, `openai-chat:gpt-4-0314`, `openai-chat:gpt-4-32k`, `openai-chat:gpt-4-32k-0314`, `openai-chat:gpt-3.5-turbo`, `openai-chat:gpt-3.5-turbo-0301` |\n",
"| `openai-chat-new` | `OPENAI_API_KEY` | <abbr title=\"You have set this environment variable, so you can use this provider's models.\">✅</abbr> | `openai-chat-new:gpt-4`, `openai-chat-new:gpt-4-0314`, `openai-chat-new:gpt-4-32k`, `openai-chat-new:gpt-4-32k-0314`, `openai-chat-new:gpt-3.5-turbo`, `openai-chat-new:gpt-3.5-turbo-0301` |\n",
"| `sagemaker-endpoint` | Not applicable. | <abbr title=\"Not applicable\">N/A</abbr> | This provider does not define a list of models. |\n"
],
"text/plain": [
Expand Down Expand Up @@ -166,12 +195,21 @@
"* openai-chat:gpt-3.5-turbo\n",
"* openai-chat:gpt-3.5-turbo-0301\n",
"\n",
"openai-chat-new\n",
"Requires environment variable OPENAI_API_KEY (set)\n",
"* openai-chat-new:gpt-4\n",
"* openai-chat-new:gpt-4-0314\n",
"* openai-chat-new:gpt-4-32k\n",
"* openai-chat-new:gpt-4-32k-0314\n",
"* openai-chat-new:gpt-3.5-turbo\n",
"* openai-chat-new:gpt-3.5-turbo-0301\n",
"\n",
"sagemaker-endpoint\n",
"* This provider does not define a list of models.\n",
"\n"
]
},
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -182,7 +220,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"id": "4d84fcac-7348-4c02-9ec3-34b300ec8459",
"metadata": {},
"outputs": [
Expand All @@ -208,7 +246,7 @@
"\n"
]
},
"execution_count": 5,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -219,9 +257,118 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "b56ff0e3-42c2-4927-affd-be6a089dfa43",
"metadata": {},
"outputs": [
{
"ename": "SyntaxError",
"evalue": "Missing parentheses in call to 'print'. Did you mean print(...)? (1142230402.py, line 1)",
"output_type": "error",
"traceback": [
"\u001b[0;36m Cell \u001b[0;32mIn[7], line 1\u001b[0;36m\u001b[0m\n\u001b[0;31m print 'foo'\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m Missing parentheses in call to 'print'. Did you mean print(...)?\n"
]
}
],
"source": [
"print 'foo'"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "63581a74-9237-4f11-aff2-48612c97cb27",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/markdown": [
"The error message \"SyntaxError: Missing parentheses in call to 'print'. Did you mean print(...)?\", occurs when attempting to use the print function without parentheses in Python 3, which is not syntactically valid. In Python 2, it was possible to use the print statement without parentheses, but in Python 3 it has become a function and its use requires parentheses. The error message suggests that the print statement should be replaced with print() to rectify the error."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"execution_count": 8,
"metadata": {
"text/markdown": {
"jupyter_ai": {
"model_id": "gpt-3.5-turbo",
"provider_id": "openai-chat"
}
}
},
"output_type": "execute_result"
}
],
"source": [
"%ai error chatgpt"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "1afe4536-f908-4bd7-aec4-f8d1cc3bf01f",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/miniconda3/envs/jupyter-ai/lib/python3.10/site-packages/langchain/llms/anthropic.py:134: UserWarning: This Anthropic LLM is deprecated. Please use `from langchain.chat_models import ChatAnthropic` instead\n",
" warnings.warn(\n"
]
},
{
"data": {
"text/markdown": [
"\n",
"The error `Cell In[7], line 1 print 'foo' ^ SyntaxError: Missing parentheses in call to 'print' . Did you mean print(...)?` \n",
"is occurring because in Python 3, the `print` statement has changed. \n",
"\n",
"In Python 2, you could simply do:\n",
"`print 'foo'`\n",
"\n",
"to print the string `foo`. \n",
"\n",
"However, in Python 3, the `print` function requires parentheses:\n",
"`print('foo')`\n",
"\n",
"So the error is telling you that you're trying to use the Python 2 `print` statement in Python 3 code. \n",
"It's suggesting that you likely meant to call the `print()` function instead, with parentheses: \n",
"`print('foo')`.\n",
"\n",
"Adding the parentheses will fix the error."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"execution_count": 9,
"metadata": {
"text/markdown": {
"jupyter_ai": {
"model_id": "claude-v1.2",
"provider_id": "anthropic"
}
}
},
"output_type": "execute_result"
}
],
"source": [
"%ai error anthropic:claude-v1.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d47aa5ec-31e0-4670-b1d1-6b4cd7f75832",
"metadata": {},
"outputs": [],
"source": []
}
Expand All @@ -242,7 +389,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
"version": "3.10.8"
}
},
"nbformat": 4,
Expand Down
63 changes: 58 additions & 5 deletions packages/jupyter-ai-magics/jupyter_ai_magics/magics.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,15 @@ def _repr_mimebundle_(self, include=None, exclude=None):

PROVIDER_NO_MODELS = 'This provider does not define a list of models.'

CANNOT_DETERMINE_MODEL_TEXT = """Cannot determine model provider from model ID '{0}'.
To see a list of models you can use, run '%ai list'"""

CANNOT_DETERMINE_MODEL_MARKDOWN = """Cannot determine model provider from model ID `{0}`.
To see a list of models you can use, run `%ai list`"""


PROMPT_TEMPLATES_BY_FORMAT = {
"code": '{prompt}\n\nProduce output as source code only, with no text or explanation before or after it.',
"html": '{prompt}\n\nProduce output in HTML format only, with no markup before or afterward.',
Expand All @@ -83,7 +92,8 @@ def _repr_mimebundle_(self, include=None, exclude=None):

AI_COMMANDS = {
"help": "Display a list of supported commands",
"list": "Display a list of models that you can use (optionally, for a single provider)"
"list": "Display a list of models that you can use (optionally, for a single provider)",
"error": "Explain the last error received. Takes a model, as %%ai does."
}

class FormatDict(dict):
Expand Down Expand Up @@ -222,6 +232,39 @@ def _ai_list_command_text(self, single_provider=None):

return output

def _ai_error_command(self, model_id):
no_errors = "There have been no errors since the kernel started."
provider_id, local_model_id = self._decompose_model_id(model_id)
Provider = self._get_provider(provider_id)
if Provider is None:
return TextOrMarkdown(
CANNOT_DETERMINE_MODEL_TEXT.format(model_id),
CANNOT_DETERMINE_MODEL_MARKDOWN.format(model_id)
)

# Find the most recent error.
ip = get_ipython()
if ('Err' not in ip.user_ns):
return TextOrMarkdown(no_errors, no_errors)

err = ip.user_ns['Err']
# Start from the previous execution count
excount = ip.execution_count - 1
last_error = None
while (excount >= 0 and last_error is None):
if(excount in err):
last_error = err[excount]
else:
excount = excount - 1;

if (last_error is None):
return TextOrMarkdown(no_errors, no_errors)

return self.ai(line=(f"""{model_id}
Explain the Python error:
{last_error}"""))

# Run an AI command using the arguments provided as a space-delimited value
def _ai_command(self, command, args_string):
args = args_string.split() # Split by whitespace
Expand All @@ -239,6 +282,18 @@ def _ai_command(self, command, args_string):
self._ai_list_command_text(provider_id),
self._ai_list_command_markdown(provider_id)
)
elif (command == 'error'):
# Required parameter: model ID
model_id = None
if (len(args) >= 1):
model_id = args[0]
else:
return TextOrMarkdown(
f'Usage: %ai {command} MODEL',
f'Usage: `%ai {command} MODEL`'
)

return self._ai_error_command(model_id)
else:
# This should be unreachable, since unhandled commands are treated like model names
return TextOrMarkdown(
Expand Down Expand Up @@ -310,11 +365,9 @@ def ai(self, line, cell=None):
Provider = self._get_provider(provider_id)
if Provider is None:
return TextOrMarkdown(
f"Cannot determine model provider from model ID '{args.model_id}'.\n\n"
+ "To see a list of models you can use, run '%ai list'.\n\n"
CANNOT_DETERMINE_MODEL_TEXT.format(args.model_id) + "\n\n"
+ "If you were trying to run a command, run '%ai help' to see a list of commands.",
f"Cannot determine model provider from model ID `{args.model_id}`.\n\n"
+ "To see a list of models you can use, run `%ai list`.\n\n"
CANNOT_DETERMINE_MODEL_MARKDOWN.format(args.model_id) + "\n\n"
+ "If you were trying to run a command, run `%ai help` to see a list of commands."
)

Expand Down

0 comments on commit e6f291e

Please sign in to comment.