From d93e8155e42667e559e83efc01a0795e69fd6c96 Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Sat, 29 Jun 2024 18:20:54 +0530 Subject: [PATCH 1/6] added the chatbot --- app.py | 61 ++++++++++++++++++++++---- poetry.lock | 117 ++++++++++++++++++++++++++++++++++++++----------- pyproject.toml | 2 + 3 files changed, 145 insertions(+), 35 deletions(-) diff --git a/app.py b/app.py index 78c530f..96e581b 100644 --- a/app.py +++ b/app.py @@ -1,10 +1,13 @@ import logging import os +from datetime import datetime, timezone from pathlib import Path import git +from cachetools import TTLCache from dotenv import load_dotenv from flask import Flask, jsonify, request +from openai import OpenAI from slack import WebClient from slack_sdk.errors import SlackApiError from slackeventsapi import SlackEventAdapter @@ -27,6 +30,17 @@ client = WebClient(token=os.environ["SLACK_TOKEN"]) client.chat_postMessage(channel=DEPLOYS_CHANNEL_NAME, text="bot started v1.9 240611-1 top") +template = """ + You're a Software Engineer (Mentor) at OWASP, + Your job is to provide help to contributors with a short message. + Contributor' Question :{Doubt} +""" + + +OpenAI_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + +cache = TTLCache(maxsize=100, ttl=86400) + @app.route("/slack/events", methods=["POST"]) def slack_events(): @@ -140,13 +154,42 @@ def handle_message(payload): text=f"Error sending message: {response['error']}", ) logging.error(f"Error sending message: {response['error']}") + # if message.get("channel_type") == "im": + # user = message["user"] # The user ID of the person who sent the message + # text = message.get("text", "") # The text of the message + # try: + # if message.get("user") != bot_user_id: + # client.chat_postMessage(channel=JOINS_CHANNEL_ID, text=f"<@{user}> said {text}") + # # Respond to the direct message + # client.chat_postMessage(channel=user, text=f"Hello <@{user}>, you said: {text}") + # except SlackApiError as e: + # print(f"Error sending response: {e.response['error']}") + + +@slack_events_adapter.on("message") +def gpt_bot(payload): + token_limit = 1000 + token_per_prompt = 80 + user = "D078YQ93TSL" + message = payload.get("event", {}) if message.get("channel_type") == "im": - user = message["user"] # The user ID of the person who sent the message - text = message.get("text", "") # The text of the message - try: - if message.get("user") != bot_user_id: - client.chat_postMessage(channel=JOINS_CHANNEL_ID, text=f"<@{user}> said {text}") - # Respond to the direct message - client.chat_postMessage(channel=user, text=f"Hello <@{user}>, you said: {text}") - except SlackApiError as e: - print(f"Error sending response: {e.response['error']}") + doubt = message.get("text", "") + prompt = template.format(doubt=doubt) + + today = datetime.now(timezone.utc).date() + rate_limit_key = f"global_daily_request_{today}" + total_token_used = cache.get(rate_limit_key, 0) + + if len(prompt) > 20: + client.chat_postMessage(channel=user, text="Please enter less than 20 characters") + if total_token_used + token_per_prompt > token_limit: + client.chat_postMessage(channel=user, text="Exceeds Token Limit") + else: + response = OpenAI_client.Completion.create( + messages=[{"role": "user", "content": prompt}], + model="gpt-3.5-turbo-0125", + max_tokens=20, + ) + answer = response.choices[0].message.content + client.chat_postMessage(channel=user, text=f"{answer}") + cache[rate_limit_key] = total_token_used + token_per_prompt diff --git a/poetry.lock b/poetry.lock index aea0d53..7083677 100644 --- a/poetry.lock +++ b/poetry.lock @@ -212,6 +212,17 @@ files = [ {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, ] +[[package]] +name = "cachetools" +version = "5.3.3" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, +] + [[package]] name = "certifi" version = "2024.6.2" @@ -362,6 +373,17 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + [[package]] name = "exceptiongroup" version = "1.2.1" @@ -790,6 +812,29 @@ files = [ {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, ] +[[package]] +name = "openai" +version = "1.35.7" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.35.7-py3-none-any.whl", hash = "sha256:3d1e0b0aac9b0db69a972d36dc7efa7563f8e8d65550b27a48f2a0c2ec207e80"}, + {file = "openai-1.35.7.tar.gz", hash = "sha256:009bfa1504c9c7ef64d87be55936d142325656bbc6d98c68b669d6472e4beb09"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.7,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + [[package]] name = "packaging" version = "24.1" @@ -1048,28 +1093,28 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.4.8" +version = "0.4.10" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"}, - {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aad360893e92486662ef3be0a339c5ca3c1b109e0134fcd37d534d4be9fb8de3"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:284c2e3f3396fb05f5f803c9fffb53ebbe09a3ebe7dda2929ed8d73ded736deb"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7354f921e3fbe04d2a62d46707e569f9315e1a613307f7311a935743c51a764"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:72584676164e15a68a15778fd1b17c28a519e7a0622161eb2debdcdabdc71883"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9678d5c9b43315f323af2233a04d747409d1e3aa6789620083a82d1066a35199"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704977a658131651a22b5ebeb28b717ef42ac6ee3b11e91dc87b633b5d83142b"}, - {file = "ruff-0.4.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05f8d6f0c3cce5026cecd83b7a143dcad503045857bc49662f736437380ad45"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6ea874950daca5697309d976c9afba830d3bf0ed66887481d6bca1673fc5b66a"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fc95aac2943ddf360376be9aa3107c8cf9640083940a8c5bd824be692d2216dc"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:384154a1c3f4bf537bac69f33720957ee49ac8d484bfc91720cc94172026ceed"}, - {file = "ruff-0.4.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e9d5ce97cacc99878aa0d084c626a15cd21e6b3d53fd6f9112b7fc485918e1fa"}, - {file = "ruff-0.4.8-py3-none-win32.whl", hash = "sha256:6d795d7639212c2dfd01991259460101c22aabf420d9b943f153ab9d9706e6a9"}, - {file = "ruff-0.4.8-py3-none-win_amd64.whl", hash = "sha256:e14a3a095d07560a9d6769a72f781d73259655919d9b396c650fc98a8157555d"}, - {file = "ruff-0.4.8-py3-none-win_arm64.whl", hash = "sha256:14019a06dbe29b608f6b7cbcec300e3170a8d86efaddb7b23405cb7f7dcaf780"}, - {file = "ruff-0.4.8.tar.gz", hash = "sha256:16d717b1d57b2e2fd68bd0bf80fb43931b79d05a7131aa477d66fc40fbd86268"}, + {file = "ruff-0.4.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5c2c4d0859305ac5a16310eec40e4e9a9dec5dcdfbe92697acd99624e8638dac"}, + {file = "ruff-0.4.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a79489607d1495685cdd911a323a35871abfb7a95d4f98fc6f85e799227ac46e"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1dd1681dfa90a41b8376a61af05cc4dc5ff32c8f14f5fe20dba9ff5deb80cd6"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c75c53bb79d71310dc79fb69eb4902fba804a81f374bc86a9b117a8d077a1784"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18238c80ee3d9100d3535d8eb15a59c4a0753b45cc55f8bf38f38d6a597b9739"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d8f71885bce242da344989cae08e263de29752f094233f932d4f5cfb4ef36a81"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:330421543bd3222cdfec481e8ff3460e8702ed1e58b494cf9d9e4bf90db52b9d"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9e9b6fb3a37b772628415b00c4fc892f97954275394ed611056a4b8a2631365e"}, + {file = "ruff-0.4.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f54c481b39a762d48f64d97351048e842861c6662d63ec599f67d515cb417f6"}, + {file = "ruff-0.4.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:67fe086b433b965c22de0b4259ddfe6fa541c95bf418499bedb9ad5fb8d1c631"}, + {file = "ruff-0.4.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:acfaaab59543382085f9eb51f8e87bac26bf96b164839955f244d07125a982ef"}, + {file = "ruff-0.4.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3cea07079962b2941244191569cf3a05541477286f5cafea638cd3aa94b56815"}, + {file = "ruff-0.4.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:338a64ef0748f8c3a80d7f05785930f7965d71ca260904a9321d13be24b79695"}, + {file = "ruff-0.4.10-py3-none-win32.whl", hash = "sha256:ffe3cd2f89cb54561c62e5fa20e8f182c0a444934bf430515a4b422f1ab7b7ca"}, + {file = "ruff-0.4.10-py3-none-win_amd64.whl", hash = "sha256:67f67cef43c55ffc8cc59e8e0b97e9e60b4837c8f21e8ab5ffd5d66e196e25f7"}, + {file = "ruff-0.4.10-py3-none-win_arm64.whl", hash = "sha256:dd1fcee327c20addac7916ca4e2653fbbf2e8388d8a6477ce5b4e986b68ae6c0"}, + {file = "ruff-0.4.10.tar.gz", hash = "sha256:3aa4f2bc388a30d346c56524f7cacca85945ba124945fe489952aadb6b5cd804"}, ] [[package]] @@ -1114,17 +1159,17 @@ sqlite = ["aiosqlite (>=0.18,<0.21)"] type = "git" url = "https://github.com/Owasp-blt/slack-machine" reference = "HEAD" -resolved_reference = "fdc12404c9adb867b5b1d1141e904851e3b80b86" +resolved_reference = "d7c5959826d3d3ffc701922df144538c0880501f" [[package]] name = "slack-sdk" -version = "3.28.0" +version = "3.30.0" description = "The Slack API Platform SDK for Python" optional = false python-versions = ">=3.6" files = [ - {file = "slack_sdk-3.28.0-py2.py3-none-any.whl", hash = "sha256:1a47700ae20566575ce494d1d1b6f594b011d06aad28e3b8e28c052cad1d6c4c"}, - {file = "slack_sdk-3.28.0.tar.gz", hash = "sha256:e6ece5cb70850492637e002e3b0d26d307939f4a33203b88cb274f7475c9a144"}, + {file = "slack_sdk-3.30.0-py2.py3-none-any.whl", hash = "sha256:42d1c95f7159887ddb4841d461fbe7ab0c48e4968f3cd44eaaa792cf109f4425"}, + {file = "slack_sdk-3.30.0.tar.gz", hash = "sha256:001a4013698d3f244645add49c80adf8addc3a6bf633193848f7cbae3d387e0b"}, ] [package.extras] @@ -1195,6 +1240,26 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "tqdm" +version = "4.66.4" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, + {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + [[package]] name = "typing-extensions" version = "4.12.2" @@ -1236,13 +1301,13 @@ devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3) [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -1374,4 +1439,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "fce39534ce61c82b13655d0770b81c5b1f569a3022e646912e4b0bda527050c8" +content-hash = "4766615f4a60dc40b814dc81480c86cd7a6ba1db8d1b406b9036a618c7ef9de7" diff --git a/pyproject.toml b/pyproject.toml index 3a5fcf3..9f13671 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,8 @@ gitpython = "^3.1.43" slack-machine = { git = "https://github.com/Owasp-blt/slack-machine" } slackeventsapi = "^3.0.1" slack-sdk = "^3.27.2" +openai = "^1.35.7" +cachetools = "^5.3.3" [tool.poetry.group.dev.dependencies] pytest = "^8.2.2" From e60c2d488a8d6163381377f5c7c7cce3cbfefccf Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Sun, 30 Jun 2024 01:36:25 +0530 Subject: [PATCH 2/6] added logs and imporved naming convention --- app.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/app.py b/app.py index 96e581b..71af680 100644 --- a/app.py +++ b/app.py @@ -37,7 +37,7 @@ """ -OpenAI_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) +openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) cache = TTLCache(maxsize=100, ttl=86400) @@ -140,7 +140,6 @@ def handle_message(payload): ): user = message.get("user") channel = message.get("channel") - logging.info(f"detected contribute sending to channel: {channel}") response = client.chat_postMessage( channel=channel, text=( @@ -172,6 +171,7 @@ def gpt_bot(payload): token_per_prompt = 80 user = "D078YQ93TSL" message = payload.get("event", {}) + if message.get("channel_type") == "im": doubt = message.get("text", "") prompt = template.format(doubt=doubt) @@ -180,16 +180,34 @@ def gpt_bot(payload): rate_limit_key = f"global_daily_request_{today}" total_token_used = cache.get(rate_limit_key, 0) - if len(prompt) > 20: + if len(doubt) > 20: client.chat_postMessage(channel=user, text="Please enter less than 20 characters") + return + if total_token_used + token_per_prompt > token_limit: client.chat_postMessage(channel=user, text="Exceeds Token Limit") - else: - response = OpenAI_client.Completion.create( + return + + try: + response = openai_client.Completion.create( messages=[{"role": "user", "content": prompt}], model="gpt-3.5-turbo-0125", max_tokens=20, ) answer = response.choices[0].message.content + except Exception as e: + logging.error(f"OpenAI API request failed: {e}") + client.chat_postMessage( + channel=user, text="An error occurred while processing your request." + ) + return + + try: client.chat_postMessage(channel=user, text=f"{answer}") cache[rate_limit_key] = total_token_used + token_per_prompt + + # Log the user's question and GPT's answer + logging.info(f"User's Question: {doubt}") + logging.info(f"GPT's Answer: {answer}") + except SlackApiError as e: + logging.error(f"Error sending message to Slack: {e.response['error']}") From 679a7eb3d04b328ecfa67523dd7f7c20ca8040d5 Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Sun, 30 Jun 2024 01:37:51 +0530 Subject: [PATCH 3/6] pre commit --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 71af680..85b76cb 100644 --- a/app.py +++ b/app.py @@ -183,11 +183,11 @@ def gpt_bot(payload): if len(doubt) > 20: client.chat_postMessage(channel=user, text="Please enter less than 20 characters") return - + if total_token_used + token_per_prompt > token_limit: client.chat_postMessage(channel=user, text="Exceeds Token Limit") return - + try: response = openai_client.Completion.create( messages=[{"role": "user", "content": prompt}], From 5215385594a2bbed505ee033d3b9073cfdd3169c Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Mon, 1 Jul 2024 21:13:45 +0530 Subject: [PATCH 4/6] increased token per question --- app.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app.py b/app.py index 85b76cb..94ea91c 100644 --- a/app.py +++ b/app.py @@ -168,7 +168,7 @@ def handle_message(payload): @slack_events_adapter.on("message") def gpt_bot(payload): token_limit = 1000 - token_per_prompt = 80 + token_per_prompt = 100 user = "D078YQ93TSL" message = payload.get("event", {}) @@ -180,8 +180,8 @@ def gpt_bot(payload): rate_limit_key = f"global_daily_request_{today}" total_token_used = cache.get(rate_limit_key, 0) - if len(doubt) > 20: - client.chat_postMessage(channel=user, text="Please enter less than 20 characters") + if len(doubt) > 50: + client.chat_postMessage(channel=user, text="Please enter less than 50 characters") return if total_token_used + token_per_prompt > token_limit: From 34deb0063e11532feea7e1efa6d3fff61325d550 Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Thu, 4 Jul 2024 13:07:10 +0530 Subject: [PATCH 5/6] reslved conflicts --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 94ea91c..74fb968 100644 --- a/app.py +++ b/app.py @@ -179,8 +179,8 @@ def gpt_bot(payload): today = datetime.now(timezone.utc).date() rate_limit_key = f"global_daily_request_{today}" total_token_used = cache.get(rate_limit_key, 0) - - if len(doubt) > 50: + doubt_limit =50 + if len(doubt) >= doubt_limit: client.chat_postMessage(channel=user, text="Please enter less than 50 characters") return From b90b2867db286f03003ff45ec022a6c26bd54554 Mon Sep 17 00:00:00 2001 From: Sarthak5598 Date: Thu, 4 Jul 2024 23:06:44 +0530 Subject: [PATCH 6/6] pre-commit --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index 74fb968..eb7cc18 100644 --- a/app.py +++ b/app.py @@ -179,7 +179,7 @@ def gpt_bot(payload): today = datetime.now(timezone.utc).date() rate_limit_key = f"global_daily_request_{today}" total_token_used = cache.get(rate_limit_key, 0) - doubt_limit =50 + doubt_limit = 50 if len(doubt) >= doubt_limit: client.chat_postMessage(channel=user, text="Please enter less than 50 characters") return