diff --git a/engines/python/setup/djl_python/output_formatter.py b/engines/python/setup/djl_python/output_formatter.py index 01f6fbdb6..a53419c00 100644 --- a/engines/python/setup/djl_python/output_formatter.py +++ b/engines/python/setup/djl_python/output_formatter.py @@ -51,39 +51,38 @@ def get_sequence_details(request_output: RequestOutput, def _json_output_formatter_best_of(request_output: RequestOutput): """When multiple sequences are generated, then we hold off sending the result until the generation is finished. - The is because, in case of best_of or beam_search, we would know the best sequence only at the end of the - generation of a request. + This is because, in case of best_of or beam_search, we would know the best sequence only at the end of generation. """ - json_encoded_str = "" - if request_output.finished: - parameters = request_output.input.parameters - best_sequence = request_output.sequences[ - request_output.best_sequence_index] - result = { - "generated_text": get_generated_text(best_sequence, request_output) - } - details = {"inputs": request_output.input.input_text} - details.update( - get_sequence_details(request_output, - request_output.best_sequence_index)) - - # other sequences indicate, all other sequences except the best/chosen sequence. - other_sequences = [] - for index in request_output.other_sequences_indices: - sequence = request_output.sequences[index] - generated_text = get_generated_text(sequence, request_output) - sequence_details = get_sequence_details(request_output, index) - sequence_details["generated_text"] = generated_text - other_sequences.append(sequence_details) - - if other_sequences: - if wait_till_generation_finished(parameters): - details["best_of_sequences"] = other_sequences - result["details"] = details - json_encoded_str = json.dumps(result, ensure_ascii=False) - if request_output.input.tgi_compat: - json_encoded_str = f"[{json_encoded_str}]" - return json_encoded_str + if not request_output.finished: + return "" + + parameters = request_output.input.parameters + best_sequence = request_output.sequences[ + request_output.best_sequence_index] + result = { + "generated_text": get_generated_text(best_sequence, request_output) + } + details = {"inputs": request_output.input.input_text} + details.update( + get_sequence_details(request_output, + request_output.best_sequence_index)) + + # other sequences indicate, all other sequences except the best/chosen sequence. + other_sequences = [] + for index in request_output.other_sequences_indices: + sequence = request_output.sequences[index] + generated_text = get_generated_text(sequence, request_output) + sequence_details = get_sequence_details(request_output, index) + sequence_details["generated_text"] = generated_text + other_sequences.append(sequence_details) + + if other_sequences: + if wait_till_generation_finished(parameters): + details["best_of_sequences"] = other_sequences + result["details"] = details + if request_output.input.tgi_compat: + result = [result] + return json.dumps(result, ensure_ascii=False) def _json_output_formatter(request_output: RequestOutput): @@ -93,74 +92,88 @@ def _json_output_formatter(request_output: RequestOutput): :return: formatted output """ - parameters = request_output.input.parameters - if wait_till_generation_finished(parameters): + if wait_till_generation_finished(request_output.input.parameters): return _json_output_formatter_best_of(request_output) best_sequence = request_output.sequences[ request_output.best_sequence_index] - generated_text = "" - if parameters.get("return_full_text"): - generated_text = request_output.input.input_text - next_token, first_token, last_token = best_sequence.get_next_token() - json_encoded_str = f"{{\"generated_text\": \"{generated_text}" if first_token else "" - tgi_compat = request_output.input.tgi_compat - if first_token and tgi_compat: - json_encoded_str = f"[{json_encoded_str}" - json_encoded_str = f"{json_encoded_str}{json.dumps(next_token.text, ensure_ascii=False)[1:-1]}" - if last_token: - details_dict = get_details_dict(request_output, include_tokens=True) - if details_dict: - details_str = f"\"details\": {json.dumps(details_dict, ensure_ascii=False)}" - json_encoded_str = f"{json_encoded_str}\", {details_str}}}" - else: - json_encoded_str = f"{json_encoded_str}\"}}" - if tgi_compat: - json_encoded_str = f"{json_encoded_str}]" - return json_encoded_str + # TODO: Fix this so it is not required. Right now, this call is needed to + # advance the token iterator, which is needed for rolling batch to work properly + next_token, _, _ = best_sequence.get_next_token() + if not request_output.finished: + return "" + details = get_details_dict(request_output, include_tokens=True) + if details.get("finish_reason") == "error": + final_token = best_sequence.get_last_token() + # In non-streaming, request either succeeds or fails so do not provide the + # partial generation response that may exist + result = { + "generated_text": None, + "error": final_token.error_msg, + "code": 400, + "details": details, + } + return json.dumps(result, ensure_ascii=False) + generated_text = get_generated_text(best_sequence, request_output) + result = { + "generated_text": generated_text, + } + if details: + result["details"] = details + if request_output.input.tgi_compat: + result = [result] + return json.dumps(result, ensure_ascii=False) def _json_3p_output_formatter(request_output: RequestOutput): best_sequence = request_output.sequences[ request_output.best_sequence_index] + # TODO: Fix this so it is not required. Right now, this call is needed to + # advance the token iterator, which is needed for rolling batch to work properly next_token, first_token, last_token = best_sequence.get_next_token() - json_encoded_str = f"{{\"body\": {{\"generation\": \"{request_output.input.input_text}" if first_token else "" - json_encoded_str = f"{json_encoded_str}{json.dumps(next_token.text, ensure_ascii=False)[1:-1]}" - if last_token: - details_dict = get_details_dict(request_output, include_tokens=True) - num_prompt_tokens = len(request_output.input.input_ids) - num_output_tokens = details_dict["generated_tokens"] - finish_reason = details_dict["finish_reason"] - details = { - "prompt_token_count": num_prompt_tokens, - "generation_token_count": num_output_tokens, - "stop_reason": finish_reason, - } - metering = { - "metering": { - "inputTokenCount": num_prompt_tokens, - "outputTokenCount": num_output_tokens, + if not request_output.finished: + return "" + + details_dict = get_details_dict(request_output, include_tokens=True) + generated_text = get_generated_text(best_sequence, request_output) + num_prompt_tokens = len(request_output.input.input_ids) + num_output_tokens = details_dict["generated_tokens"] + finish_reason = details_dict["finish_reason"] + body = { + "generation": generated_text, + "prompt_token_count": num_prompt_tokens, + "generation_token_count": num_output_tokens, + "stop_reason": finish_reason, + } + error = None + if finish_reason == "error": + body["generation"] = None + body["prompt_token_count"] = 0 + body["generation_token_count"] = 0 + body["stop_reason"] = "error" + error = { + "error": { + "error_code": 400, + "error_msg": next_token.error_msg } } - details_str = f"{json.dumps(details, ensure_ascii=False)[1:-1]}" - metering_str = f"{json.dumps(metering, ensure_ascii=False)[1:-1]}" - json_encoded_str = f"{json_encoded_str}\", {details_str}}}" - json_encoded_str = f"{json_encoded_str}, {metering_str}" - if finish_reason == "error": - error = { - "error": { - "error_code": 400, - "error_msg": next_token.error_msg - } - } - error_str = f"{json.dumps(error, ensure_ascii=False)[1:-1]}" - json_encoded_str = f"{json_encoded_str}, {error_str}" - json_encoded_str = f"{json_encoded_str}, \"content_type\": \"application/json\"}}" - return json_encoded_str + + metering = { + "inputTokenCount": num_prompt_tokens, + "outputTokenCount": num_output_tokens, + } + result = { + "body": body, + "metering": metering, + "content_type": "application/json", # TODO: sort out multimodal here + } + if error: + result["error"] = error + return json.dumps(result, ensure_ascii=False) def get_details_dict(request_output: RequestOutput, - include_tokens: bool = True) -> Optional[Dict]: + include_tokens: bool = True) -> Dict: parameters = request_output.input.parameters best_sequence = request_output.sequences[ request_output.best_sequence_index] @@ -184,7 +197,7 @@ def get_details_dict(request_output: RequestOutput, elif best_sequence.finish_reason == "error": return {"finish_reason": best_sequence.finish_reason} else: - return None + return {} def _jsonlines_output_formatter(request_output: RequestOutput): @@ -239,7 +252,6 @@ def _jsonlines_3p_output_formatter(request_output: RequestOutput): "error_code": 400, "error_msg": token_details["error_msg"] } - json_encoded_str = json.dumps(final_dict, ensure_ascii=False) + "\n" return json_encoded_str @@ -253,60 +265,58 @@ def _json_chat_output_formatter(request_output: RequestOutput): parameters = request_output.input.parameters best_sequence = request_output.sequences[ request_output.best_sequence_index] - generated_text = request_output.input.input_text if parameters.get( - "return_full_text") else "" - next_token, first_token, last_token = best_sequence.get_next_token() + generated_text = get_generated_text(best_sequence, request_output) + best_sequence.get_next_token() + if not request_output.finished: + return "" created = int(time.time()) - choice1 = { + choice = { "index": 0, "message": { "role": "assistant", - "content": generated_text - } + "content": generated_text, + }, + "finish_reason": best_sequence.finish_reason, } response1 = { "id": f"chatcmpl-{id}", "object": "chat.completion", "created": created, - "choices": [choice1] # Currently only support 1 choice } - json_encoded_str = f"{json.dumps(response1, ensure_ascii=False)[:-5]}" if first_token else "" - json_encoded_str = f"{json_encoded_str}{json.dumps(next_token.text, ensure_ascii=False)[1:-1]}" - if last_token: - logprobs = None - if parameters.get("logprobs"): - logprobs = { - "content": [{ + if parameters.get("logprobs"): + logprobs = { + "content": [ + { "token": t.text, "logprob": t.log_prob, "bytes": (b := [ord(c) - for c in t.text] if t.text else None), + for c in t.text] if t.text else None), "top_logprobs": # Currently only support 1 top_logprobs - [{ - "token": t.text, - "logprob": t.log_prob, - "bytes": b - }] + [{ + "token": t.text, + "logprob": t.log_prob, + "bytes": b + }] } for t in best_sequence.tokens - ] - } - choice2 = { - "logprobs": logprobs, - "finish_reason": best_sequence.finish_reason - } - prompt_tokens = len(request_output.input.input_ids) - completion_tokens = len(best_sequence.tokens) - response2 = { - "choices": [choice2], - "usage": { - "prompt_tokens": prompt_tokens, - "completion_tokens": completion_tokens, - "total_tokens": (prompt_tokens + completion_tokens) - } + ] } - json_encoded_str = f"{json_encoded_str}\"}}, {json.dumps(response2, ensure_ascii=False)[14:]}" - return json_encoded_str + choice["logprobs"] = logprobs + prompt_tokens = len(request_output.input.input_ids) + completion_tokens = len(best_sequence.tokens) + usage = { + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "total_tokens": (prompt_tokens + completion_tokens) + } + result = { + "id": f"chatcmpl-{id}", + "object": "chat.completion", + "created": created, + "choices": [choice], + "usage": usage, + } + return json.dumps(result, ensure_ascii=False) def _jsonlines_chat_output_formatter(request_output: RequestOutput): diff --git a/engines/python/setup/djl_python/request.py b/engines/python/setup/djl_python/request.py index c519cca62..af44d2114 100644 --- a/engines/python/setup/djl_python/request.py +++ b/engines/python/setup/djl_python/request.py @@ -11,7 +11,7 @@ # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for # the specific language governing permissions and limitations under the License. import inspect -from typing import Union, Callable, Any, List, Dict +from typing import Union, Callable, Any, List, Dict, Optional from djl_python.output_formatter import get_output_formatter, adapt_legacy_output_formatter from djl_python.request_io import Token, TextGenerationOutput, TextInput, RequestOutput, RequestInput @@ -64,6 +64,8 @@ def __init__(self, request_input: RequestInput = None): self.request_output = TextGenerationOutput(request_id=self.id, input=self.request_input) self.next_token_str = "" + self.error_message = None + self.error_code = None def _is_output_formatter_legacy(self): signature_parameters = list( @@ -111,23 +113,20 @@ def get_next_token(self) -> str: """ if self.next_token_str: return self.next_token_str + if self.legacy_formatter: + self.next_token_str = adapt_legacy_output_formatter( + self.request_output) + elif wait_till_generation_finished( + self.request_output.input.parameters): + # there is no need for iterators for best_of and num_beams. + self.next_token_str = self.output_formatter(self.request_output) else: - # TODO: Remove this support when all of our customers onboard. - if self.legacy_formatter: - self.next_token_str = adapt_legacy_output_formatter( + best_sequence = self.request_output.sequences[ + self.request_output.best_sequence_index] + while best_sequence.has_next_token(): + self.next_token_str += self.output_formatter( self.request_output) - elif wait_till_generation_finished( - self.request_output.input.parameters): - # there is no need for iterators for best_of and num_beams. - self.next_token_str = self.output_formatter( - self.request_output) - else: - best_sequence = self.request_output.sequences[ - self.request_output.best_sequence_index] - while best_sequence.has_next_token(): - self.next_token_str += self.output_formatter( - self.request_output) - return self.next_token_str + return self.next_token_str def reset_next_token(self): """ @@ -150,3 +149,31 @@ def get_content_type(self) -> str: :return: content type """ return self.content_type + + def get_error_message(self) -> Optional[str]: + """ + Error message for the request if inference failed + + :return: the error message + """ + return self.error_message + + def get_error_code(self) -> Optional[int]: + """ + HTTP Status code to return when inference fails + + :return: the status code + """ + return self.error_code + + def set_error_message(self, error_message: str): + """ + Sets the Error message for the request if inference failed + """ + self.error_message = error_message + + def set_error_code(self, code: int): + """ + Sets the HTTP Status code to return when inference fails + """ + self.error_code = code diff --git a/engines/python/setup/djl_python/request_io.py b/engines/python/setup/djl_python/request_io.py index b7d49a2cf..c1f708c39 100644 --- a/engines/python/setup/djl_python/request_io.py +++ b/engines/python/setup/djl_python/request_io.py @@ -115,6 +115,11 @@ def get_next_token(self) -> (Token, bool, bool): return self.tokens[index], first_token, last_token return None, False, False + def get_last_token(self) -> Optional[Token]: + if self._last_token_index: + return self.tokens[self._last_token_index] + return None + def get_next_top_tokens(self): """Returns the next list of top tokens from the top_tokens list, or None if all have been iterated.""" if self.has_next_top_tokens(): diff --git a/engines/python/setup/djl_python/rolling_batch/rolling_batch.py b/engines/python/setup/djl_python/rolling_batch/rolling_batch.py index 4dfb93712..88bcd3142 100644 --- a/engines/python/setup/djl_python/rolling_batch/rolling_batch.py +++ b/engines/python/setup/djl_python/rolling_batch/rolling_batch.py @@ -11,6 +11,7 @@ # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for # the specific language governing permissions and limitations under the License. import logging +import json from abc import ABC, abstractmethod from typing import List @@ -56,6 +57,9 @@ def try_catch_handling(self, *args, **kwargs): request.set_next_token(token, last_token=True, finish_reason="error") + request.set_error_message(str(e)) + # TODO: make configurable + request.set_error_code(424) response = self.postprocess_results() self.reset() return response @@ -134,6 +138,10 @@ def postprocess_results(self) -> List[dict]: "last": req.is_last_token(), "content_type": req.get_content_type() } + if req.get_error_message(): + res["error"] = req.get_error_message() + if req.get_error_code(): + res["code"] = req.get_error_code() req.reset_next_token() results.append(res) diff --git a/engines/python/setup/djl_python/tests/test_rolling_batch.py b/engines/python/setup/djl_python/tests/test_rolling_batch.py index 4b70d21d0..074e2c375 100644 --- a/engines/python/setup/djl_python/tests/test_rolling_batch.py +++ b/engines/python/setup/djl_python/tests/test_rolling_batch.py @@ -28,18 +28,14 @@ def test_json_fmt(self): req2 = Request(req_input2) for req in [req1, req2]: req.set_next_token(Token(244, "He", -0.334532)) - print(req.get_next_token(), end='') - assert req.get_next_token() == '{"generated_text": "He' - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - print(req.get_next_token(), end='') - assert req.get_next_token() == 'llo' - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') print(req.get_next_token(), end='') - assert req.get_next_token() == ' world"}' - req.reset_next_token() + assert req.get_next_token() == json.dumps( + {"generated_text": "Hello world"}) def test_json_fmt_with_appending(self): req_input1 = TextInput(request_id=0, @@ -58,14 +54,14 @@ def test_json_fmt_with_appending(self): req2 = Request(req_input2) for req in [req1, req2]: req.set_next_token(Token(244, "He", -0.334532)) + req.get_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - print(req.get_next_token(), end='') - assert req.get_next_token() == '{"generated_text": "Hello' - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') print(req.get_next_token(), end='') - assert req.get_next_token() == ' world"}' + assert req.get_next_token() == json.dumps( + {"generated_text": "Hello world"}) def test_fmt_hf_compat(self): req = Request( @@ -79,16 +75,12 @@ def test_fmt_hf_compat(self): output_formatter=_json_output_formatter, tgi_compat=True)) - final_str = [] req.set_next_token(Token(244, "He", -0.334532)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') - final_str.append(req.get_next_token()) - final_json = json.loads(''.join(final_str)) + final_json = json.loads(req.get_next_token()) print(final_json, end='') assert final_json == [{ 'generated_text': 'This is a wonderful dayHello world', @@ -261,33 +253,31 @@ def test_3p_fmt(self): input_text="This is a wonderful day", parameters={ "max_new_tokens": 1024, - "details": True + "details": True, + "return_full_text": True, }, output_formatter="3p")) - final_str = [] req.set_next_token(Token(244, "He", -0.334532)) + req.get_next_token() req.set_next_token(Token(244, "llo", -0.123123)) + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854)) - req.set_next_token(Token(245, "", -1, True, "some error"), True, - "error") - final_str.append(req.get_next_token()) - output = json.loads(''.join(final_str)) + req.get_next_token() + req.set_next_token(Token(245, "", -1, True), True, "length") + output = json.loads(req.get_next_token()) + print(req.get_next_token()) assert output == { "body": { "generation": "This is a wonderful dayHello world", "prompt_token_count": 0, "generation_token_count": 4, - "stop_reason": "error" + "stop_reason": "length" }, "content_type": "application/json", "metering": { "inputTokenCount": 0, "outputTokenCount": 4, }, - "error": { - "error_code": 400, - "error_msg": "some error", - } } def test_3p_stream_fmt(self): @@ -375,20 +365,14 @@ def test_return_full_text(self): }, output_formatter=_json_output_formatter)) - final_str = [] req.set_next_token(Token(244, "He", -0.334532)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') - final_str.append(req.get_next_token()) - final_json = json.loads(''.join(final_str)) - print(final_json, end='') - assert final_json == { - "generated_text": "This is a wonderful dayHello world", - } + + assert req.get_next_token() == json.dumps( + {"generated_text": "This is a wonderful dayHello world"}) req = Request( TextInput(request_id=0, @@ -420,17 +404,14 @@ def test_details(self): "details": True }, output_formatter=_json_output_formatter)) - final_str = [] req.set_next_token(Token(244, "He", -0.334532)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - final_str.append(req.get_next_token()) - req.reset_next_token() + req.get_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') - final_str.append(req.get_next_token()) - final_json = json.loads(''.join(final_str)) - print(final_json) + + final_json = json.loads(req.get_next_token()) + assert final_json == { "generated_text": "Hello world", "details": { @@ -569,22 +550,20 @@ def test_chat_json(self): parameters={ "max_new_tokens": 256, "details": True, - "logprobs": True + "logprobs": True, }, output_formatter=_json_chat_output_formatter, )) - final_str = [] req.set_next_token(Token(244, "He", -0.334532)) - final_str.append(req.get_next_token()) + req.get_next_token() req.reset_next_token() req.set_next_token(Token(576, "llo", -0.123123)) - final_str.append(req.get_next_token()) + req.get_next_token() req.reset_next_token() req.set_next_token(Token(4558, " world", -0.567854), True, 'length') - final_str.append(req.get_next_token()) - final_json = json.loads(''.join(final_str)) - print(final_json) - assert final_json['choices'] == [{ + output = json.loads(req.get_next_token()) + print(output) + assert output['choices'] == [{ 'index': 0, 'message': { 'role': 'assistant', @@ -628,7 +607,7 @@ def test_chat_json(self): }, 'finish_reason': 'length' }] - assert final_json['usage'] == { + assert output['usage'] == { 'prompt_tokens': 0, 'completion_tokens': 3, 'total_tokens': 3 diff --git a/engines/python/setup/djl_python/three_p/three_p_utils.py b/engines/python/setup/djl_python/three_p/three_p_utils.py index 674e35948..09dbb0826 100644 --- a/engines/python/setup/djl_python/three_p/three_p_utils.py +++ b/engines/python/setup/djl_python/three_p/three_p_utils.py @@ -26,6 +26,7 @@ def parse_3p_request(input_map: dict, is_rolling_batch: bool, tokenizer, _param["temperature"] = input_map.pop("temperature", 0.5) _param["top_p"] = input_map.pop("top_p", 0.9) _param["max_new_tokens"] = input_map.pop("max_gen_len", 512) + _param["return_full_text"] = True if _param["temperature"] > 0: _param["do_sample"] = True if invoke_type == "InvokeEndpointWithResponseStream": diff --git a/engines/python/setup/djl_python/utils.py b/engines/python/setup/djl_python/utils.py index 0617522ca..e4f35ed3a 100644 --- a/engines/python/setup/djl_python/utils.py +++ b/engines/python/setup/djl_python/utils.py @@ -98,7 +98,7 @@ def rolling_batch_inference(parsed_input, inputs: Input, outputs: Output, outputs.add(Output.binary_encode(err), key="data", batch_index=i) outputs.add_property(f"batch_{i}_Content-Type", "application/json") else: - content_type = result[idx].pop("content_type") + content_type = result[idx].get("content_type") outputs.add(Output.binary_encode(result[idx]), key="data", batch_index=i) diff --git a/engines/python/src/main/java/ai/djl/python/engine/RollingBatch.java b/engines/python/src/main/java/ai/djl/python/engine/RollingBatch.java index e1bdd6230..f26aa2746 100644 --- a/engines/python/src/main/java/ai/djl/python/engine/RollingBatch.java +++ b/engines/python/src/main/java/ai/djl/python/engine/RollingBatch.java @@ -307,6 +307,11 @@ void addResponse(byte[] json, Map properties) { } ++count; if (json[0] == '{') { + logger.warn( + "Customizing the parse_input method of the huggingface inference handler is" + + " no longer supported.This functionality will be removed in an" + + " upcoming version. For custom input parsing, please migrate to using" + + " the custom input formatter support"); // TODO: backward compatible for 0.23.0 release in case user // customize huggingface.parse_input() String s = new String(json, StandardCharsets.UTF_8); @@ -348,11 +353,18 @@ void addResponse(byte[] json, Map properties) { break; } } + if ((nextToken == null || nextToken.isEmpty()) && code == null) { + // in non-streaming cases, we do not return content until generation is finished + return; + } if (code != null) { Map map = new ConcurrentHashMap<>(2); - map.put("code", Integer.parseInt(code)); + int httpStatusCode = Integer.parseInt(code); + map.put("code", httpStatusCode); + output.setCode(httpStatusCode); if (error != null) { map.put("error", error); + output.setMessage(error); } byte[] buffer = JsonUtils.GSON.toJson(map).getBytes(StandardCharsets.UTF_8); data.appendContent(buffer, true);