From 89fc0f6e24e14e11a1b2cdb9165dceabdc9b7a5e Mon Sep 17 00:00:00 2001 From: Marius-Constantin Dinu Date: Mon, 24 Apr 2023 00:47:27 +0200 Subject: [PATCH] Fixed preview growth and GPT-4 context for templates --- symai/__init__.py | 2 +- symai/backend/base.py | 2 +- symai/backend/engine_gptX_chat.py | 43 ++++++++++++++++++------------- symai/symbol.py | 4 +-- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/symai/__init__.py b/symai/__init__.py index 72da84f4..3dd44c53 100644 --- a/symai/__init__.py +++ b/symai/__init__.py @@ -3,7 +3,7 @@ import logging -SYMAI_VERSION = "0.2.17" +SYMAI_VERSION = "0.2.18" __version__ = SYMAI_VERSION __root_dir__ = os.getcwd() diff --git a/symai/backend/base.py b/symai/backend/base.py index 4b002f0d..c1287177 100644 --- a/symai/backend/base.py +++ b/symai/backend/base.py @@ -47,7 +47,7 @@ def __call__(self, *args: Any, **kwds: Any) -> List[str]: return res def preview(self, wrp_params): - return wrp_params['prompts'][0] + return str(wrp_params['prompts']) def forward(self, *args: Any, **kwds: Any) -> List[str]: raise NotADirectoryError() diff --git a/symai/backend/engine_gptX_chat.py b/symai/backend/engine_gptX_chat.py index 042d01ff..fb9755e5 100644 --- a/symai/backend/engine_gptX_chat.py +++ b/symai/backend/engine_gptX_chat.py @@ -46,17 +46,20 @@ def forward(self, prompts: List[str], *args, **kwargs) -> List[str]: presence_penalty = kwargs['presence_penalty'] if 'presence_penalty' in kwargs else 0 top_p = kwargs['top_p'] if 'top_p' in kwargs else 1 + if suffix: + prompts_[1]['content'] += f"[ASSISTANT_PLACEHOLDER]{suffix}" + prompts_[1]['content'] += f"\n----------------\n Only generate content for the placeholder [ASSISTANT_PLACEHOLDER] following the instructions and context:\n" + try: res = openai.ChatCompletion.create(model=model, - messages=prompts_, - #suffix=suffix, #TODO fix suffix and template - max_tokens=max_tokens, - temperature=temperature, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - top_p=top_p, - stop=stop, - n=1) + messages=prompts_, + max_tokens=max_tokens, + temperature=temperature, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + top_p=top_p, + stop=stop, + n=1) output_handler = kwargs['output_handler'] if 'output_handler' in kwargs else None if output_handler: output_handler(res) @@ -94,13 +97,17 @@ def forward(self, prompts: List[str], *args, **kwargs) -> List[str]: return rsp if isinstance(prompts, list) else rsp[0] def prepare(self, args, kwargs, wrp_params): - _non_verbose_output = """You do not output anything else, like verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. + _non_verbose_output = """[META_INSTRUCTIONS_START] You do not output anything else, like verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. Consider well formatted output, e.g. for sentences use punctuation, spaces etc. or for code use indentation, etc. + Never add meta instructions information to your output! + [META_INSTRUCTIONS_END] -------------- """ user: str = "" - system: str = _non_verbose_output + system: str = "" + + system += _non_verbose_output # system relevant instruction # add static context @@ -110,20 +117,20 @@ def prepare(self, args, kwargs, wrp_params): system += f"General Context:\n{static_ctxt}\n\n----------------\n\n" if wrp_params['prompt'] is not None: system += str(wrp_params['prompt']) - # build operation - operation = f'{system}\n' if system and len(system) > 0 else '' + # build system + system = f'{system}\n' if system and len(system) > 0 else '' # add examples examples: List[str] = wrp_params['examples'] if examples: - operation += f"Examples:\n" - operation += f"{str(examples)}\n" + system += f"Examples:\n" + system += f"{str(examples)}\n" # add dynamic context if len(dyn_ctxt) > 0: - operation += f"\n\n----------------\n\nDynamic Context:\n{dyn_ctxt}" + system += f"\n\n----------------\n\nDynamic Context:\n{dyn_ctxt}" # add method payload payload = wrp_params['payload'] if 'payload' in wrp_params else None if payload is not None: - operation += f"\n\n----------------\n\nAdditional Context: {payload}" + system += f"\n\n----------------\n\nAdditional Context: {payload}" # add user request suffix: str = wrp_params['processed_input'] @@ -133,6 +140,6 @@ def prepare(self, args, kwargs, wrp_params): user += f"{suffix}" wrp_params['prompts'] = [ - { "role": "system", "content": operation }, + { "role": "system", "content": system }, { "role": "user", "content": user }, ] diff --git a/symai/symbol.py b/symai/symbol.py index 1e2337dd..792040d6 100644 --- a/symai/symbol.py +++ b/symai/symbol.py @@ -595,9 +595,9 @@ def stream(self, expr: "Expression", size = max_tokens - r.size() # simulate the expression - r = expr(r, max_tokens=size, preview=True, **kwargs) + prev = expr(r, max_tokens=size, preview=True, **kwargs) # if the expression is too big, split it - if r.size() > max_tokens: + if prev.size() > max_tokens: # split r1_split = r.value[:len(r)//2] r = expr(r1_split, max_tokens=size, **kwargs)