diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_viquad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_viquad_en.md
new file mode 100644
index 00000000000000..b56f668a2dd137
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_viquad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from Khanh)
+author: John Snow Labs
+name: bert_qa_base_multilingual_cased_finetuned_viquad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-viquad` is a English model originally trained by `Khanh`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_cased_finetuned_viquad_en_5.2.0_3.0_1699993581067.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_cased_finetuned_viquad_en_5.2.0_3.0_1699993581067.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_cased_finetuned_viquad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_cased_finetuned_viquad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.cased_multilingual_base_finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_multilingual_cased_finetuned_viquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Khanh/bert-base-multilingual-cased-finetuned-viquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_xx.md
new file mode 100644
index 00000000000000..c68bccc8198d41
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_cased_finetuned_xx.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering Base Cased model (from obokkkk)
+author: John Snow Labs
+name: bert_qa_base_multilingual_cased_finetuned
+date: 2023-11-14
+tags: [xx, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned` is a Multilingual model originally trained by `obokkkk`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_cased_finetuned_xx_5.2.0_3.0_1699993678387.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_cased_finetuned_xx_5.2.0_3.0_1699993678387.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_cased_finetuned","xx")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_cased_finetuned","xx")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_multilingual_cased_finetuned|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/obokkkk/bert-base-multilingual-cased-finetuned
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_uncased_finetuned_squadv2_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_uncased_finetuned_squadv2_xx.md
new file mode 100644
index 00000000000000..f9b91f4ba9bb88
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_multilingual_uncased_finetuned_squadv2_xx.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering Base Uncased model (from khoanvm)
+author: John Snow Labs
+name: bert_qa_base_multilingual_uncased_finetuned_squadv2
+date: 2023-11-14
+tags: [xx, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-uncased-finetuned-squadv2` is a Multilingual model originally trained by `khoanvm`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_uncased_finetuned_squadv2_xx_5.2.0_3.0_1699993920867.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_multilingual_uncased_finetuned_squadv2_xx_5.2.0_3.0_1699993920867.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_uncased_finetuned_squadv2","xx")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_multilingual_uncased_finetuned_squadv2","xx")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_multilingual_uncased_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|625.5 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/khoanvm/bert-base-multilingual-uncased-finetuned-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_nnish_cased_squad1_fi.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_nnish_cased_squad1_fi.md
new file mode 100644
index 00000000000000..eaf190971bc5ef
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_nnish_cased_squad1_fi.md
@@ -0,0 +1,96 @@
+---
+layout: model
+title: Finnish BertForQuestionAnswering Base Cased model (from ilmariky)
+author: John Snow Labs
+name: bert_qa_base_nnish_cased_squad1
+date: 2023-11-14
+tags: [fi, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fi
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-finnish-cased-squad1-fi` is a Finnish model originally trained by `ilmariky`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_nnish_cased_squad1_fi_5.2.0_3.0_1699993974738.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_nnish_cased_squad1_fi_5.2.0_3.0_1699993974738.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_nnish_cased_squad1","fi")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_nnish_cased_squad1","fi")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_nnish_cased_squad1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fi|
+|Size:|464.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ilmariky/bert-base-finnish-cased-squad1-fi
+- https://github.com/google-research-datasets/tydiqa
+- https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_parsquad_fa.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_parsquad_fa.md
new file mode 100644
index 00000000000000..7e5cda9101c285
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_parsquad_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Base Uncased model (from mohsenfayyaz)
+author: John Snow Labs
+name: bert_qa_base_pars_uncased_parsquad
+date: 2023-11-14
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-parsbert-uncased_parsquad` is a Persian model originally trained by `mohsenfayyaz`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_parsquad_fa_5.2.0_3.0_1699994256254.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_parsquad_fa_5.2.0_3.0_1699994256254.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_parsquad","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_parsquad","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_pars_uncased_parsquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mohsenfayyaz/bert-base-parsbert-uncased_parsquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_1epoch_fa.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_1epoch_fa.md
new file mode 100644
index 00000000000000..29890b6ff0ac1b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_1epoch_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Base Uncased model (from mohsenfayyaz)
+author: John Snow Labs
+name: bert_qa_base_pars_uncased_pquad_1epoch
+date: 2023-11-14
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-parsbert-uncased_pquad_1epoch` is a Persian model originally trained by `mohsenfayyaz`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_1epoch_fa_5.2.0_3.0_1699994627595.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_1epoch_fa_5.2.0_3.0_1699994627595.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad_1epoch","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad_1epoch","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_pars_uncased_pquad_1epoch|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mohsenfayyaz/bert-base-parsbert-uncased_pquad_1epoch
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_fa.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_fa.md
new file mode 100644
index 00000000000000..76e113873fca69
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Base Uncased model (from mohsenfayyaz)
+author: John Snow Labs
+name: bert_qa_base_pars_uncased_pquad
+date: 2023-11-14
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-parsbert-uncased_pquad` is a Persian model originally trained by `mohsenfayyaz`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_fa_5.2.0_3.0_1699993573583.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_fa_5.2.0_3.0_1699993573583.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_pars_uncased_pquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mohsenfayyaz/bert-base-parsbert-uncased_pquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_lr1e_5_fa.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_lr1e_5_fa.md
new file mode 100644
index 00000000000000..acfc528f765251
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_pars_uncased_pquad_lr1e_5_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Base Uncased model (from mohsenfayyaz)
+author: John Snow Labs
+name: bert_qa_base_pars_uncased_pquad_lr1e_5
+date: 2023-11-14
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-parsbert-uncased_pquad_lr1e-5` is a Persian model originally trained by `mohsenfayyaz`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_lr1e_5_fa_5.2.0_3.0_1699994981140.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_pars_uncased_pquad_lr1e_5_fa_5.2.0_3.0_1699994981140.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad_lr1e_5","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_base_pars_uncased_pquad_lr1e_5","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_pars_uncased_pquad_lr1e_5|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mohsenfayyaz/bert-base-parsbert-uncased_pquad_lr1e-5
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_parsbert_uncased_finetuned_squad_fa.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_parsbert_uncased_finetuned_squad_fa.md
new file mode 100644
index 00000000000000..fed4852c186845
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_parsbert_uncased_finetuned_squad_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Base Uncased model (from mhmsadegh)
+author: John Snow Labs
+name: bert_qa_base_parsbert_uncased_finetuned_squad
+date: 2023-11-14
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-parsbert-uncased-finetuned-squad` is a Persian model originally trained by `mhmsadegh`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_parsbert_uncased_finetuned_squad_fa_5.2.0_3.0_1699993442017.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_parsbert_uncased_finetuned_squad_fa_5.2.0_3.0_1699993442017.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_parsbert_uncased_finetuned_squad","fa") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_parsbert_uncased_finetuned_squad","fa")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_parsbert_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mhmsadegh/bert-base-parsbert-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1_tr.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1_tr.md
new file mode 100644
index 00000000000000..0c55dcee49d476
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Base Cased model (from husnu)
+author: John Snow Labs
+name: bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1
+date: 2023-11-14
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3TQUAD2-finetuned_lr-2e-05_epochs-1` is a Turkish model originally trained by `husnu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1_tr_5.2.0_3.0_1699995382989.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1_tr_5.2.0_3.0_1699995382989.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|688.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3TQUAD2-finetuned_lr-2e-05_epochs-1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3_tr.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3_tr.md
new file mode 100644
index 00000000000000..876a42a77bb907
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Base Cased model (from husnu)
+author: John Snow Labs
+name: bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3
+date: 2023-11-14
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3TQUAD2-finetuned_lr-2e-05_epochs-3` is a Turkish model originally trained by `husnu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3_tr_5.2.0_3.0_1699993798250.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3_tr_5.2.0_3.0_1699993798250.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_turkish_128k_cased_tquad2_finetuned_lr_2e_05_epochs_3|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|688.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/husnu/bert-base-turkish-128k-cased-finetuned_lr-2e-05_epochs-3TQUAD2-finetuned_lr-2e-05_epochs-3
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..e2f65de0194d55
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2_en_5.2.0_3.0_1699995746222.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2_en_5.2.0_3.0_1699995746222.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.uncased_seed_2_base_128d_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_uncased_few_shot_k_128_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/bert-base-uncased-few-shot-k-128-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..22a3950d375bf6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-few-shot-k-32-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8_en_5.2.0_3.0_1699993504295.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8_en_5.2.0_3.0_1699993504295.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.uncased_seed_8_base_32d_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_uncased_few_shot_k_32_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/bert-base-uncased-few-shot-k-32-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..16961e4a509494
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4_en_5.2.0_3.0_1699994169986.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4_en_5.2.0_3.0_1699994169986.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.uncased_seed_4_base_64d_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_uncased_few_shot_k_64_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/bert-base-uncased-few-shot-k-64-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_pretrain_finetuned_coqa_falttened_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_pretrain_finetuned_coqa_falttened_en.md
new file mode 100644
index 00000000000000..6d437c3d4ddadc
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_base_uncased_pretrain_finetuned_coqa_falttened_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from alistvt)
+author: John Snow Labs
+name: bert_qa_base_uncased_pretrain_finetuned_coqa_falttened
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-pretrain-finetuned-coqa-falttened` is a English model originally trained by `alistvt`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_pretrain_finetuned_coqa_falttened_en_5.2.0_3.0_1699994432857.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_base_uncased_pretrain_finetuned_coqa_falttened_en_5.2.0_3.0_1699994432857.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_pretrain_finetuned_coqa_falttened","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_base_uncased_pretrain_finetuned_coqa_falttened","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.uncased_base_finetuned.by_alistvt").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_base_uncased_pretrain_finetuned_coqa_falttened|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/alistvt/bert-base-uncased-pretrain-finetuned-coqa-falttened
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bdickson_bert_base_uncased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bdickson_bert_base_uncased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..20958975177c72
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bdickson_bert_base_uncased_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from bdickson)
+author: John Snow Labs
+name: bert_qa_bdickson_bert_base_uncased_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-finetuned-squad` is a English model orginally trained by `bdickson`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bdickson_bert_base_uncased_finetuned_squad_en_5.2.0_3.0_1699996011166.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bdickson_bert_base_uncased_finetuned_squad_en_5.2.0_3.0_1699996011166.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bdickson_bert_base_uncased_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bdickson_bert_base_uncased_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.base_uncased.by_bdickson").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bdickson_bert_base_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bdickson/bert-base-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_en.md
new file mode 100644
index 00000000000000..449c217ecdfed8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from krinal214)
+author: John Snow Labs
+name: bert_qa_bert_all
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-all` is a English model orginally trained by `krinal214`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_en_5.2.0_3.0_1699994404485.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_en_5.2.0_3.0_1699994404485.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_all","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_all","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.tydiqa.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_all|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/krinal214/bert-all
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_all_translated_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_all_translated_en.md
new file mode 100644
index 00000000000000..63adf7243a1992
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_all_translated_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from krinal214)
+author: John Snow Labs
+name: bert_qa_bert_all_squad_all_translated
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-all-squad_all_translated` is a English model orginally trained by `krinal214`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_all_translated_en_5.2.0_3.0_1699994792788.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_all_translated_en_5.2.0_3.0_1699994792788.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_all_squad_all_translated","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_all_squad_all_translated","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad_translated.bert.by_krinal214").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_all_squad_all_translated|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/krinal214/bert-all-squad_all_translated
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_ben_tel_context_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_ben_tel_context_en.md
new file mode 100644
index 00000000000000..a50f3b6f1bba2f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_ben_tel_context_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from krinal214)
+author: John Snow Labs
+name: bert_qa_bert_all_squad_ben_tel_context
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-all-squad_ben_tel_context` is a English model orginally trained by `krinal214`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_ben_tel_context_en_5.2.0_3.0_1699993841524.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_ben_tel_context_en_5.2.0_3.0_1699993841524.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_all_squad_ben_tel_context","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_all_squad_ben_tel_context","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad_ben_tel.bert.by_krinal214").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_all_squad_ben_tel_context|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/krinal214/bert-all-squad_ben_tel_context
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_que_translated_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_que_translated_en.md
new file mode 100644
index 00000000000000..e7d1a2b74fa576
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_squad_que_translated_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from krinal214)
+author: John Snow Labs
+name: bert_qa_bert_all_squad_que_translated
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-all-squad_que_translated` is a English model orginally trained by `krinal214`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_que_translated_en_5.2.0_3.0_1699996407002.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_squad_que_translated_en_5.2.0_3.0_1699996407002.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_all_squad_que_translated","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_all_squad_que_translated","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad_translated.bert.que.by_krinal214").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_all_squad_que_translated|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/krinal214/bert-all-squad_que_translated
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_translated_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_translated_en.md
new file mode 100644
index 00000000000000..999ccb618f1d2e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_all_translated_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from krinal214)
+author: John Snow Labs
+name: bert_qa_bert_all_translated
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-all-translated` is a English model orginally trained by `krinal214`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_translated_en_5.2.0_3.0_1699994248431.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_all_translated_en_5.2.0_3.0_1699994248431.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_all_translated","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_all_translated","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_krinal214").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_all_translated|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/krinal214/bert-all-translated
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_2048_full_trivia_copied_embeddings_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_2048_full_trivia_copied_embeddings_en.md
new file mode 100644
index 00000000000000..d59b5bc852b3ec
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_2048_full_trivia_copied_embeddings_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from MrAnderson)
+author: John Snow Labs
+name: bert_qa_bert_base_2048_full_trivia_copied_embeddings
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-2048-full-trivia-copied-embeddings` is a English model orginally trained by `MrAnderson`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_2048_full_trivia_copied_embeddings_en_5.2.0_3.0_1699996762251.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_2048_full_trivia_copied_embeddings_en_5.2.0_3.0_1699996762251.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_2048_full_trivia_copied_embeddings","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_2048_full_trivia_copied_embeddings","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.trivia.bert.base_2048.by_MrAnderson").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_2048_full_trivia_copied_embeddings|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|411.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/MrAnderson/bert-base-2048-full-trivia-copied-embeddings
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_cased_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_cased_chaii_en.md
new file mode 100644
index 00000000000000..f69cecb207c2a5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_cased_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_base_cased_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-cased-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_cased_chaii_en_5.2.0_3.0_1699994506880.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_cased_chaii_en_5.2.0_3.0_1699994506880.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_cased_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_cased_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.chaii.bert.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_cased_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-base-cased-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_faquad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_faquad_en.md
new file mode 100644
index 00000000000000..9074135cff3d9a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_faquad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ricardo-filho)
+author: John Snow Labs
+name: bert_qa_bert_base_faquad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert_base_faquad` is a English model orginally trained by `ricardo-filho`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_faquad_en_5.2.0_3.0_1699995091550.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_faquad_en_5.2.0_3.0_1699995091550.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_faquad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_faquad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_faquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|405.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ricardo-filho/bert_base_faquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetune_qa_th.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetune_qa_th.md
new file mode 100644
index 00000000000000..cabb6bd23312a7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetune_qa_th.md
@@ -0,0 +1,110 @@
+---
+layout: model
+title: Thai BertForQuestionAnswering model (from airesearch)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_cased_finetune_qa
+date: 2023-11-14
+tags: [th, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: th
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetune-qa` is a Thai model orginally trained by `airesearch`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetune_qa_th_5.2.0_3.0_1699994217748.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetune_qa_th_5.2.0_3.0_1699994217748.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_cased_finetune_qa","th") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_cased_finetune_qa","th")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("th.answer_question.bert.multilingual_base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_cased_finetune_qa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|th|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/airesearch/bert-base-multilingual-cased-finetune-qa
+- https://github.com/vistec-AI/thai2transformers/blob/dev/scripts/downstream/train_question_answering_lm_finetuning.py
+- https://wandb.ai/cstorm125/wangchanberta-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_chaii_ta.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_chaii_ta.md
new file mode 100644
index 00000000000000..1c6019679741f9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_chaii_ta.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Tamil BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_cased_finetuned_chaii
+date: 2023-11-14
+tags: [open_source, question_answering, bert, ta, onnx]
+task: Question Answering
+language: ta
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-chaii` is a Tamil model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetuned_chaii_ta_5.2.0_3.0_1699994790461.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetuned_chaii_ta_5.2.0_3.0_1699994790461.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_cased_finetuned_chaii","ta") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_cased_finetuned_chaii","ta")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ta.answer_question.chaii.bert.multilingual_base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_cased_finetuned_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ta|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-base-multilingual-cased-finetuned-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_klue_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_klue_ko.md
new file mode 100644
index 00000000000000..b1ff72947c3117
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_finetuned_klue_ko.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering model (from obokkkk)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_cased_finetuned_klue
+date: 2023-11-14
+tags: [open_source, question_answering, bert, ko, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-klue` is a Korean model orginally trained by `obokkkk`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetuned_klue_ko_5.2.0_3.0_1699994852278.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_finetuned_klue_ko_5.2.0_3.0_1699994852278.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_cased_finetuned_klue","ko") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_cased_finetuned_klue","ko")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ko.answer_question.klue.bert.multilingual_base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_cased_finetuned_klue|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ko|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/obokkkk/bert-base-multilingual-cased-finetuned-klue
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_ko.md
new file mode 100644
index 00000000000000..bb13de932e504e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_ko.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering model (from sangrimlee)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_cased_korquad
+date: 2023-11-14
+tags: [open_source, question_answering, bert, ko, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-korquad` is a Korean model orginally trained by `sangrimlee`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_korquad_ko_5.2.0_3.0_1699995481370.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_korquad_ko_5.2.0_3.0_1699995481370.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_cased_korquad","ko") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_cased_korquad","ko")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ko.answer_question.korquad.bert.multilingual_base_cased.by_sangrimlee").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_cased_korquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ko|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sangrimlee/bert-base-multilingual-cased-korquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_v1_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_v1_ko.md
new file mode 100644
index 00000000000000..9218d44efb486a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_cased_korquad_v1_ko.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering model (from eliza-dukim)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_cased_korquad_v1
+date: 2023-11-14
+tags: [open_source, question_answering, bert, ko, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased_korquad-v1` is a Korean model orginally trained by `eliza-dukim`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_korquad_v1_ko_5.2.0_3.0_1699995262571.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_cased_korquad_v1_ko_5.2.0_3.0_1699995262571.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_cased_korquad_v1","ko") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_cased_korquad_v1","ko")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ko.answer_question.korquad.bert.multilingual_base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_cased_korquad_v1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ko|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/eliza-dukim/bert-base-multilingual-cased_korquad-v1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_xquad_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_xquad_xx.md
new file mode 100644
index 00000000000000..f5280bb4b4f3b6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_multilingual_xquad_xx.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering model (from alon-albalak)
+author: John Snow Labs
+name: bert_qa_bert_base_multilingual_xquad
+date: 2023-11-14
+tags: [open_source, question_answering, bert, xx, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-xquad` is a Multilingual model orginally trained by `alon-albalak`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_xquad_xx_5.2.0_3.0_1699995793907.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_multilingual_xquad_xx_5.2.0_3.0_1699995793907.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_multilingual_xquad","xx") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_multilingual_xquad","xx")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("xx.answer_question.xquad.bert.multilingual_base").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_multilingual_xquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|xx|
+|Size:|625.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/alon-albalak/bert-base-multilingual-xquad
+- https://github.com/deepmind/xquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa_es.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa_es.md
new file mode 100644
index 00000000000000..41c5f565dfc30d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa_es.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Castilian, Spanish BertForQuestionAnswering model (from CenIA)
+author: John Snow Labs
+name: bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa
+date: 2023-11-14
+tags: [open_source, question_answering, bert, es, onnx]
+task: Question Answering
+language: es
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-spanish-wwm-cased-finetuned-qa-mlqa` is a Castilian, Spanish model orginally trained by `CenIA`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa_es_5.2.0_3.0_1699996044180.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa_es_5.2.0_3.0_1699996044180.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa","es") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa","es")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("es.answer_question.mlqa.bert.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_mlqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|es|
+|Size:|409.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/CenIA/bert-base-spanish-wwm-cased-finetuned-qa-mlqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac_es.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac_es.md
new file mode 100644
index 00000000000000..48e56746ef97ab
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac_es.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Castilian, Spanish BertForQuestionAnswering model (from CenIA)
+author: John Snow Labs
+name: bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac
+date: 2023-11-14
+tags: [open_source, question_answering, bert, es, onnx]
+task: Question Answering
+language: es
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-spanish-wwm-cased-finetuned-qa-sqac` is a Castilian, Spanish model orginally trained by `CenIA`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac_es_5.2.0_3.0_1699995110057.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac_es_5.2.0_3.0_1699995110057.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac","es") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac","es")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("es.answer_question.sqac.bert.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_spanish_wwm_cased_finetuned_qa_sqac|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|es|
+|Size:|409.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/CenIA/bert-base-spanish-wwm-cased-finetuned-qa-sqac
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_coqa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_coqa_en.md
new file mode 100644
index 00000000000000..dbeabc2db443d9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_coqa_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peggyhuang)
+author: John Snow Labs
+name: bert_qa_bert_base_uncased_coqa
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-coqa` is a English model orginally trained by `peggyhuang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_uncased_coqa_en_5.2.0_3.0_1699996350702.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_uncased_coqa_en_5.2.0_3.0_1699996350702.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_uncased_coqa","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_uncased_coqa","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_uncased_coqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peggyhuang/bert-base-uncased-coqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_squad2_covid_qa_deepset_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_squad2_covid_qa_deepset_en.md
new file mode 100644
index 00000000000000..d958ce2161343e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_base_uncased_squad2_covid_qa_deepset_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from armageddon)
+author: John Snow Labs
+name: bert_qa_bert_base_uncased_squad2_covid_qa_deepset
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-squad2-covid-qa-deepset` is a English model orginally trained by `armageddon`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_uncased_squad2_covid_qa_deepset_en_5.2.0_3.0_1699994554541.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_base_uncased_squad2_covid_qa_deepset_en_5.2.0_3.0_1699994554541.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_base_uncased_squad2_covid_qa_deepset","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_base_uncased_squad2_covid_qa_deepset","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2_covid.bert.base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_base_uncased_squad2_covid_qa_deepset|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/armageddon/bert-base-uncased-squad2-covid-qa-deepset
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_jackh1995_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_jackh1995_en.md
new file mode 100644
index 00000000000000..043ab6e556da7e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_jackh1995_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from jackh1995)
+author: John Snow Labs
+name: bert_qa_bert_finetuned_jackh1995
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned` is a English model orginally trained by `jackh1995`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_finetuned_jackh1995_en_5.2.0_3.0_1699995632485.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_finetuned_jackh1995_en_5.2.0_3.0_1699995632485.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_finetuned_jackh1995","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_finetuned_jackh1995","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_jackh1995").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_finetuned_jackh1995|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|380.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jackh1995/bert-finetuned
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_lr2_e5_b16_ep2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_lr2_e5_b16_ep2_en.md
new file mode 100644
index 00000000000000..12aacdaac30b95
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_finetuned_lr2_e5_b16_ep2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from motiondew)
+author: John Snow Labs
+name: bert_qa_bert_finetuned_lr2_e5_b16_ep2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-lr2-e5-b16-ep2` is a English model orginally trained by `motiondew`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_finetuned_lr2_e5_b16_ep2_en_5.2.0_3.0_1699995956287.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_finetuned_lr2_e5_b16_ep2_en_5.2.0_3.0_1699995956287.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_finetuned_lr2_e5_b16_ep2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_finetuned_lr2_e5_b16_ep2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_motiondew").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_finetuned_lr2_e5_b16_ep2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/motiondew/bert-finetuned-lr2-e5-b16-ep2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl256_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl256_en.md
new file mode 100644
index 00000000000000..eeb18e3d80a391
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl256_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from vuiseng9)
+author: John Snow Labs
+name: bert_qa_bert_l_squadv1.1_sl256
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-l-squadv1.1-sl256` is a English model orginally trained by `vuiseng9`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_l_squadv1.1_sl256_en_5.2.0_3.0_1699996982091.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_l_squadv1.1_sl256_en_5.2.0_3.0_1699996982091.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_l_squadv1.1_sl256","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_l_squadv1.1_sl256","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.sl256.by_vuiseng9").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_l_squadv1.1_sl256|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/vuiseng9/bert-l-squadv1.1-sl256
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl384_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl384_en.md
new file mode 100644
index 00000000000000..feacfdb5b1527c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_l_squadv1.1_sl384_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from vuiseng9)
+author: John Snow Labs
+name: bert_qa_bert_l_squadv1.1_sl384
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-l-squadv1.1-sl384` is a English model orginally trained by `vuiseng9`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_l_squadv1.1_sl384_en_5.2.0_3.0_1699997522825.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_l_squadv1.1_sl384_en_5.2.0_3.0_1699997522825.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_l_squadv1.1_sl384","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_l_squadv1.1_sl384","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.sl384.by_vuiseng9").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_l_squadv1.1_sl384|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/vuiseng9/bert-l-squadv1.1-sl384
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_faquad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_faquad_en.md
new file mode 100644
index 00000000000000..7c5ce55a9f5905
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_faquad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ricardo-filho)
+author: John Snow Labs
+name: bert_qa_bert_large_faquad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert_large_faquad` is a English model orginally trained by `ricardo-filho`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_faquad_en_5.2.0_3.0_1699998050521.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_faquad_en_5.2.0_3.0_1699998050521.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_faquad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_faquad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.large.by_ricardo-filho").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_faquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ricardo-filho/bert_large_faquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_finetuned_docvqa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_finetuned_docvqa_en.md
new file mode 100644
index 00000000000000..6a367f41d3b232
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_finetuned_docvqa_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from tiennvcs)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_finetuned_docvqa
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-finetuned-docvqa` is a English model orginally trained by `tiennvcs`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_finetuned_docvqa_en_5.2.0_3.0_1699997434549.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_finetuned_docvqa_en_5.2.0_3.0_1699997434549.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_finetuned_docvqa","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_finetuned_docvqa","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.large_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_finetuned_docvqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/tiennvcs/bert-large-uncased-finetuned-docvqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squad2_covid_qa_deepset_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squad2_covid_qa_deepset_en.md
new file mode 100644
index 00000000000000..cd87c59feb6203
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squad2_covid_qa_deepset_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from armageddon)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_squad2_covid_qa_deepset
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squad2-covid-qa-deepset` is a English model orginally trained by `armageddon`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squad2_covid_qa_deepset_en_5.2.0_3.0_1699995085123.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squad2_covid_qa_deepset_en_5.2.0_3.0_1699995085123.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_squad2_covid_qa_deepset","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_squad2_covid_qa_deepset","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2_covid.bert.large_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_squad2_covid_qa_deepset|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/armageddon/bert-large-uncased-squad2-covid-qa-deepset
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa_en.md
new file mode 100644
index 00000000000000..8f8697f386bead
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa_en.md
@@ -0,0 +1,110 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from Intel)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squadv1.1-sparse-80-1x4-block-pruneofa` is a English model orginally trained by `Intel`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa_en_5.2.0_3.0_1699995636644.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa_en_5.2.0_3.0_1699995636644.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large_uncased_sparse_80_1x4_block_pruneofa.by_Intel").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_squadv1.1_sparse_80_1x4_block_pruneofa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|436.9 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Intel/bert-large-uncased-squadv1.1-sparse-80-1x4-block-pruneofa
+- https://arxiv.org/abs/2111.05754
+- https://github.com/IntelLabs/Model-Compression-Research-Package/tree/main/research/prune-once-for-all
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv2_en.md
new file mode 100644
index 00000000000000..53f27792a25fdf
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_squadv2_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from madlag)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squadv2` is a English model orginally trained by `madlag`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squadv2_en_5.2.0_3.0_1699996683751.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_squadv2_en_5.2.0_3.0_1699996683751.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.large_uncased_v2.by_madlag").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/madlag/bert-large-uncased-squadv2
+- https://arxiv.org/pdf/1810.04805v2.pdf%5D
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_chaii_en.md
new file mode 100644
index 00000000000000..385c8d449a596c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_whole_word_masking_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-whole-word-masking-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_chaii_en_5.2.0_3.0_1699997266251.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_chaii_en_5.2.0_3.0_1699997266251.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_whole_word_masking_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_whole_word_masking_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.chaii.bert.large_uncased_uncased_whole_word_masking.by_SauravMaheshkar").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_whole_word_masking_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-large-uncased-whole-word-masking-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii_en.md
new file mode 100644
index 00000000000000..31284903b34d22
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-whole-word-masking-finetuned-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii_en_5.2.0_3.0_1699997828295.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii_en_5.2.0_3.0_1699997828295.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.chaii.bert.large_uncased_uncased_whole_word_masking_finetuned.by_SauravMaheshkar").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_whole_word_masking_finetuned_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-large-uncased-whole-word-masking-finetuned-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad_en.md
new file mode 100644
index 00000000000000..d84c9dc8076f83
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from haddadalwi)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-whole-word-masking-finetuned-squad-finetuned-islamic-squad` is a English model orginally trained by `haddadalwi`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad_en_5.2.0_3.0_1699997999939.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad_en_5.2.0_3.0_1699997999939.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large_uncased.by_haddadalwi").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_whole_word_masking_finetuned_squad_finetuned_islamic_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/haddadalwi/bert-large-uncased-whole-word-masking-finetuned-squad-finetuned-islamic-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..86d2df1efa560f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from madlag)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-whole-word-masking-finetuned-squadv2` is a English model orginally trained by `madlag`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2_en_5.2.0_3.0_1699998620273.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2_en_5.2.0_3.0_1699998620273.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.large_uncased_whole_word_masking_v2.by_madlag").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_whole_word_masking_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/madlag/bert-large-uncased-whole-word-masking-finetuned-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_squad2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_squad2_en.md
new file mode 100644
index 00000000000000..5162254c1c317b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_large_uncased_whole_word_masking_squad2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from deepset)
+author: John Snow Labs
+name: bert_qa_bert_large_uncased_whole_word_masking_squad2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-whole-word-masking-squad2` is a English model orginally trained by `deepset`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_squad2_en_5.2.0_3.0_1699995681298.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_large_uncased_whole_word_masking_squad2_en_5.2.0_3.0_1699995681298.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_large_uncased_whole_word_masking_squad2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_large_uncased_whole_word_masking_squad2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.large_uncased.by_deepset").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_large_uncased_whole_word_masking_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/deepset/bert-large-uncased-whole-word-masking-squad2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_finetuned_squad_en.md
new file mode 100644
index 00000000000000..071a0739a79cdb
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_bert_medium_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-medium-finetuned-squad` is a English model orginally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_medium_finetuned_squad_en_5.2.0_3.0_1699998837047.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_medium_finetuned_squad_en_5.2.0_3.0_1699998837047.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_medium_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_medium_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.medium.by_anas-awadalla").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_medium_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|154.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/bert-medium-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_squad2_distilled_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_squad2_distilled_en.md
new file mode 100644
index 00000000000000..d9e169cf8b4e2a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_medium_squad2_distilled_en.md
@@ -0,0 +1,118 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from deepset)
+author: John Snow Labs
+name: bert_qa_bert_medium_squad2_distilled
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-medium-squad2-distilled` is a English model orginally trained by `deepset`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_medium_squad2_distilled_en_5.2.0_3.0_1699999003843.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_medium_squad2_distilled_en_5.2.0_3.0_1699999003843.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_medium_squad2_distilled","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_medium_squad2_distilled","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.distilled_medium").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_medium_squad2_distilled|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|154.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/deepset/bert-medium-squad2-distilled
+- https://github.com/deepset-ai/haystack/discussions
+- https://deepset.ai
+- https://twitter.com/deepset_ai
+- http://www.deepset.ai/jobs
+- https://haystack.deepset.ai/community/join
+- https://github.com/deepset-ai/haystack/
+- https://deepset.ai/german-bert
+- https://www.linkedin.com/company/deepset-ai/
+- https://github.com/deepset-ai/FARM
+- https://deepset.ai/germanquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_mini_5_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_mini_5_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..73375612864e05
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_mini_5_finetuned_squadv2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_mini_5_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-mini-5-finetuned-squadv2` is a English model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_mini_5_finetuned_squadv2_en_5.2.0_3.0_1699999165366.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_mini_5_finetuned_squadv2_en_5.2.0_3.0_1699999165366.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_mini_5_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_mini_5_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.base_v2_5.by_mrm8488").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_mini_5_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|65.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-mini-5-finetuned-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_chaii_en.md
new file mode 100644
index 00000000000000..cd404986d0e73b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_multi_cased_finedtuned_xquad_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-cased-finedtuned-xquad-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finedtuned_xquad_chaii_en_5.2.0_3.0_1699999525919.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finedtuned_xquad_chaii_en_5.2.0_3.0_1699999525919.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_cased_finedtuned_xquad_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_cased_finedtuned_xquad_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.xquad_chaii.bert.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_cased_finedtuned_xquad_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-multi-cased-finedtuned-xquad-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp_xx.md
new file mode 100644
index 00000000000000..5fe774c072294a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp_xx.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp
+date: 2023-11-14
+tags: [te, en, open_source, question_answering, bert, xx, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-cased-finedtuned-xquad-tydiqa-goldp` is a Multilingual model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp_xx_5.2.0_3.0_1699998394777.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp_xx_5.2.0_3.0_1699998394777.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp","xx") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp","xx")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("xx.answer_question.xquad_tydiqa.bert.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_cased_finedtuned_xquad_tydiqa_goldp|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|xx|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-multi-cased-finedtuned-xquad-tydiqa-goldp
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_chaii_en.md
new file mode 100644
index 00000000000000..ddfe6dd24aa063
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_multi_cased_finetuned_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-cased-finetuned-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finetuned_chaii_en_5.2.0_3.0_1699998193227.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finetuned_chaii_en_5.2.0_3.0_1699998193227.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_cased_finetuned_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_cased_finetuned_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.chaii.bert.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_cased_finetuned_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-multi-cased-finetuned-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab_en.md
new file mode 100644
index 00000000000000..c6cd3a8fca719f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from TingChenChang)
+author: John Snow Labs
+name: bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-cased-finetuned-xquadv1-finetuned-squad-colab` is a English model orginally trained by `TingChenChang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab_en_5.2.0_3.0_1699998716394.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab_en_5.2.0_3.0_1699998716394.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.xquad_squad.bert.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_cased_finetuned_xquadv1_finetuned_squad_colab|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/TingChenChang/bert-multi-cased-finetuned-xquadv1-finetuned-squad-colab
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_english_german_squad2_de.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_english_german_squad2_de.md
new file mode 100644
index 00000000000000..3de3ec27be7585
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_english_german_squad2_de.md
@@ -0,0 +1,110 @@
+---
+layout: model
+title: German BertForQuestionAnswering model (from deutsche-telekom)
+author: John Snow Labs
+name: bert_qa_bert_multi_english_german_squad2
+date: 2023-11-14
+tags: [de, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: de
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-english-german-squad2` is a German model orginally trained by `deutsche-telekom`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_english_german_squad2_de_5.2.0_3.0_1699996034487.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_english_german_squad2_de_5.2.0_3.0_1699996034487.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_english_german_squad2","de") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_english_german_squad2","de")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("de.answer_question.squadv2.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_english_german_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|de|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/deutsche-telekom/bert-multi-english-german-squad2
+- https://rajpurkar.github.io/SQuAD-explorer/
+- https://github.com/google-research/bert/blob/master/multilingual.md
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_uncased_finetuned_chaii_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_uncased_finetuned_chaii_en.md
new file mode 100644
index 00000000000000..5127b5592c469b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_multi_uncased_finetuned_chaii_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SauravMaheshkar)
+author: John Snow Labs
+name: bert_qa_bert_multi_uncased_finetuned_chaii
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-uncased-finetuned-chaii` is a English model orginally trained by `SauravMaheshkar`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_uncased_finetuned_chaii_en_5.2.0_3.0_1699999874498.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_multi_uncased_finetuned_chaii_en_5.2.0_3.0_1699999874498.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_multi_uncased_finetuned_chaii","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_multi_uncased_finetuned_chaii","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.chaii.bert.uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_multi_uncased_finetuned_chaii|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|625.5 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SauravMaheshkar/bert-multi-uncased-finetuned-chaii
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_qasper_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_qasper_en.md
new file mode 100644
index 00000000000000..27f9234aa15a50
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_qasper_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from z-uo)
+author: John Snow Labs
+name: bert_qa_bert_qasper
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-qasper` is a English model orginally trained by `z-uo`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_qasper_en_5.2.0_3.0_1699998983862.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_qasper_en_5.2.0_3.0_1699998983862.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_qasper","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_qasper","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_z-uo").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_qasper|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/z-uo/bert-qasper
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4_en.md
new file mode 100644
index 00000000000000..e2c457e0877fa8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4 BertForQuestionAnswering from motiondew
+author: John Snow Labs
+name: bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4` is a English model originally trained by motiondew.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4_en_5.2.0_3.0_1699999176084.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4_en_5.2.0_3.0_1699999176084.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_set_date_1_lr_2e_5_bosnian_32_ep_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+
+## References
+
+https://huggingface.co/motiondew/bert-set_date_1-lr-2e-5-bs-32-ep-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_small_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_small_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..9687a398d315da
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_small_finetuned_squadv2_en.md
@@ -0,0 +1,114 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_small_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-small-finetuned-squadv2` is a English model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_small_finetuned_squadv2_en_5.2.0_3.0_1699999356346.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_small_finetuned_squadv2_en_5.2.0_3.0_1699999356346.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_small_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_small_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.small.by_mrm8488").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_small_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|107.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-small-finetuned-squadv2
+- https://twitter.com/mrm8488
+- https://github.com/google-research
+- https://arxiv.org/abs/1908.08962
+- https://rajpurkar.github.io/SQuAD-explorer/
+- https://github.com/google-research/bert/
+- https://www.linkedin.com/in/manuel-romero-cs/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_2_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_2_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..c9bf6053ea0827
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_2_finetuned_squadv2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_tiny_2_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-tiny-2-finetuned-squadv2` is a English model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_2_finetuned_squadv2_en_5.2.0_3.0_1700000353666.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_2_finetuned_squadv2_en_5.2.0_3.0_1700000353666.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_tiny_2_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_tiny_2_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.tiny_v2.by_mrm8488").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_tiny_2_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|19.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-tiny-2-finetuned-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_5_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_5_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..9bf06d89f7f686
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_5_finetuned_squadv2_en.md
@@ -0,0 +1,113 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_tiny_5_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-tiny-5-finetuned-squadv2` is a English model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_5_finetuned_squadv2_en_5.2.0_3.0_1700000467523.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_5_finetuned_squadv2_en_5.2.0_3.0_1700000467523.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_tiny_5_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_tiny_5_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.tiny_v5.by_mrm8488").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_tiny_5_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|24.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-tiny-5-finetuned-squadv2
+- https://twitter.com/mrm8488
+- https://github.com/google-research
+- https://arxiv.org/abs/1908.08962
+- https://rajpurkar.github.io/SQuAD-explorer/
+- https://www.linkedin.com/in/manuel-romero-cs/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..584719dfc599a7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_tiny_finetuned_squadv2_en.md
@@ -0,0 +1,114 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrm8488)
+author: John Snow Labs
+name: bert_qa_bert_tiny_finetuned_squadv2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-tiny-finetuned-squadv2` is a English model orginally trained by `mrm8488`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_finetuned_squadv2_en_5.2.0_3.0_1699999509086.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_tiny_finetuned_squadv2_en_5.2.0_3.0_1699999509086.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_tiny_finetuned_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_tiny_finetuned_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.tiny_.by_mrm8488").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_tiny_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|16.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrm8488/bert-tiny-finetuned-squadv2
+- https://twitter.com/mrm8488
+- https://github.com/google-research
+- https://arxiv.org/abs/1908.08962
+- https://rajpurkar.github.io/SQuAD-explorer/
+- https://github.com/google-research/bert/
+- https://www.linkedin.com/in/manuel-romero-cs/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_turkish_question_answering_tr.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_turkish_question_answering_tr.md
new file mode 100644
index 00000000000000..2d7672c7085f8f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_turkish_question_answering_tr.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering model (from lserinol)
+author: John Snow Labs
+name: bert_qa_bert_turkish_question_answering
+date: 2023-11-14
+tags: [tr, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-turkish-question-answering` is a Turkish model orginally trained by `lserinol`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_turkish_question_answering_tr_5.2.0_3.0_1700000755160.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_turkish_question_answering_tr_5.2.0_3.0_1700000755160.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bert_turkish_question_answering","tr") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bert_turkish_question_answering","tr")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("tr.answer_question.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_turkish_question_answering|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|tr|
+|Size:|412.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/lserinol/bert-turkish-question-answering
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna_en.md
new file mode 100644
index 00000000000000..1688aa879c9c8f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna_en.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: English bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna BertForQuestionAnswering from aodiniz
+author: John Snow Labs
+name: bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna` is a English model originally trained by aodiniz.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna_en_5.2.0_3.0_1699999706791.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bert_uncased_l_10_h_512_a_8_cord19_200616_squad2_covid_qna_en_5.2.0_3.0_1699999706791.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bert_uncased_l_6_h_128_a_2_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|19.6 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+https://huggingface.co/aodiniz/bert_uncased_L-6_H-128_A-2_squad2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_01_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_01_en.md
new file mode 100644
index 00000000000000..28a6ea272588e8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_01_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from JAlexis)
+author: John Snow Labs
+name: bert_qa_bertfast_01
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertFast_01` is a English model originally trained by `JAlexis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertfast_01_en_5.2.0_3.0_1699996341011.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertfast_01_en_5.2.0_3.0_1699996341011.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bertfast_01","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bertfast_01","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertfast_01|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/JAlexis/bertFast_01
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_02_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_02_en.md
new file mode 100644
index 00000000000000..56a21f14ef0507
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertfast_02_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from JAlexis)
+author: John Snow Labs
+name: bert_qa_bertfast_02
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertFast_02` is a English model originally trained by `JAlexis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertfast_02_en_5.2.0_3.0_1700001601061.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertfast_02_en_5.2.0_3.0_1700001601061.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bertfast_02","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bertfast_02","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertfast_02|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/JAlexis/bertFast_02
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertimbau_squad1.1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertimbau_squad1.1_en.md
new file mode 100644
index 00000000000000..4dd526732b521b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertimbau_squad1.1_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from hendrixcosta)
+author: John Snow Labs
+name: bert_qa_bertimbau_squad1.1
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertimbau-squad1.1` is a English model orginally trained by `hendrixcosta`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertimbau_squad1.1_en_5.2.0_3.0_1699996902121.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertimbau_squad1.1_en_5.2.0_3.0_1699996902121.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bertimbau_squad1.1","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bertimbau_squad1.1","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_hendrixcosta").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertimbau_squad1.1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/hendrixcosta/bertimbau-squad1.1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertlargeabsa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertlargeabsa_en.md
new file mode 100644
index 00000000000000..66a3dc2c2b6123
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertlargeabsa_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Cased model (from LucasS)
+author: John Snow Labs
+name: bert_qa_bertlargeabsa
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertLargeABSA` is a English model originally trained by `LucasS`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertlargeabsa_en_5.2.0_3.0_1700001215917.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertlargeabsa_en_5.2.0_3.0_1700001215917.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bertlargeabsa","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_bertlargeabsa","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.abs").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertlargeabsa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/LucasS/bertLargeABSA
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_base_cmrc_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_base_cmrc_en.md
new file mode 100644
index 00000000000000..b2a4811735601a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_base_cmrc_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from rsvp-ai)
+author: John Snow Labs
+name: bert_qa_bertserini_base_cmrc
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertserini-bert-base-cmrc` is a English model originally trained by `rsvp-ai`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_base_cmrc_en_5.2.0_3.0_1700001534997.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_base_cmrc_en_5.2.0_3.0_1700001534997.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bertserini_base_cmrc","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_bertserini_base_cmrc","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base.serini.cmrc").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertserini_base_cmrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|381.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/rsvp-ai/bertserini-bert-base-cmrc
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_base_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_base_squad_en.md
new file mode 100644
index 00000000000000..e169dd1d19508b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_base_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from rsvp-ai)
+author: John Snow Labs
+name: bert_qa_bertserini_bert_base_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertserini-bert-base-squad` is a English model orginally trained by `rsvp-ai`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_bert_base_squad_en_5.2.0_3.0_1700001867988.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_bert_base_squad_en_5.2.0_3.0_1700001867988.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bertserini_bert_base_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bertserini_bert_base_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.base.by_rsvp-ai").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertserini_bert_base_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/rsvp-ai/bertserini-bert-base-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_large_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_large_squad_en.md
new file mode 100644
index 00000000000000..49321ec7476492
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bertserini_bert_large_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from rsvp-ai)
+author: John Snow Labs
+name: bert_qa_bertserini_bert_large_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertserini-bert-large-squad` is a English model orginally trained by `rsvp-ai`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_bert_large_squad_en_5.2.0_3.0_1699999112455.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bertserini_bert_large_squad_en_5.2.0_3.0_1699999112455.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bertserini_bert_large_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bertserini_bert_large_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large.by_rsvp-ai").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bertserini_bert_large_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/rsvp-ai/bertserini-bert-large-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_sqac_es.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_sqac_es.md
new file mode 100644
index 00000000000000..dc5bbe59d3121b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_sqac_es.md
@@ -0,0 +1,112 @@
+---
+layout: model
+title: Spanish BertForQuestionAnswering model (from IIC)
+author: John Snow Labs
+name: bert_qa_beto_base_spanish_sqac
+date: 2023-11-14
+tags: [es, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: es
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `beto-base-spanish-sqac` is a Spanish model orginally trained by `IIC`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_beto_base_spanish_sqac_es_5.2.0_3.0_1699997233869.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_beto_base_spanish_sqac_es_5.2.0_3.0_1699997233869.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_beto_base_spanish_sqac","es") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_beto_base_spanish_sqac","es")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("es.answer_question.sqac.bert.base").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_beto_base_spanish_sqac|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|es|
+|Size:|409.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/IIC/beto-base-spanish-sqac
+- https://paperswithcode.com/sota?task=question-answering&dataset=PlanTL-GOB-ES%2FSQAC
+- https://arxiv.org/abs/2107.07253
+- https://github.com/dccuchile/beto
+- https://www.bsc.es/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_squades2_es.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_squades2_es.md
new file mode 100644
index 00000000000000..a444147e984ae8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_beto_base_spanish_squades2_es.md
@@ -0,0 +1,96 @@
+---
+layout: model
+title: Spanish BertForQuestionAnswering Base Cased model (from inigopm)
+author: John Snow Labs
+name: bert_qa_beto_base_spanish_squades2
+date: 2023-11-14
+tags: [es, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: es
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `beto-base-spanish-squades2` is a Spanish model originally trained by `inigopm`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_beto_base_spanish_squades2_es_5.2.0_3.0_1700002133617.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_beto_base_spanish_squades2_es_5.2.0_3.0_1700002133617.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_beto_base_spanish_squades2","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_beto_base_spanish_squades2","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_beto_base_spanish_squades2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|es|
+|Size:|409.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/inigopm/beto-base-spanish-squades2
+- https://github.com/josecannete/spanish-corpora
+- https://paperswithcode.com/sota?task=question-answering&dataset=squad_es+v2.0.0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_en.md
new file mode 100644
index 00000000000000..004d227056713b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from dmis-lab)
+author: John Snow Labs
+name: bert_qa_biobert_base_cased_v1.1_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-base-cased-v1.1-squad` is a English model orginally trained by `dmis-lab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_en_5.2.0_3.0_1700001838261.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_en_5.2.0_3.0_1700001838261.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_base_cased_v1.1_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_base_cased_v1.1_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.biobert.base_cased.by_dmis-lab").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_base_cased_v1.1_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/dmis-lab/biobert-base-cased-v1.1-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert_en.md
new file mode 100644
index 00000000000000..c844456626a945
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from juliusco)
+author: John Snow Labs
+name: bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-base-cased-v1.1-squad-finetuned-biobert` is a English model orginally trained by `juliusco`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert_en_5.2.0_3.0_1700002123070.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert_en_5.2.0_3.0_1700002123070.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.biobert.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_base_cased_v1.1_squad_finetuned_biobert|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/juliusco/biobert-base-cased-v1.1-squad-finetuned-biobert
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert_en.md
new file mode 100644
index 00000000000000..1afee3a54cc824
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from juliusco)
+author: John Snow Labs
+name: bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-base-cased-v1.1-squad-finetuned-covbiobert` is a English model orginally trained by `juliusco`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert_en_5.2.0_3.0_1699997515541.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert_en_5.2.0_3.0_1699997515541.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.covid_biobert.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_base_cased_v1.1_squad_finetuned_covbiobert|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/juliusco/biobert-base-cased-v1.1-squad-finetuned-covbiobert
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert_en.md
new file mode 100644
index 00000000000000..129803d50878ae
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from juliusco)
+author: John Snow Labs
+name: bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-base-cased-v1.1-squad-finetuned-covdrobert` is a English model orginally trained by `juliusco`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert_en_5.2.0_3.0_1700002427396.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert_en_5.2.0_3.0_1700002427396.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.covid_roberta.base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_base_cased_v1.1_squad_finetuned_covdrobert|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/juliusco/biobert-base-cased-v1.1-squad-finetuned-covdrobert
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_bioasq_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_bioasq_en.md
new file mode 100644
index 00000000000000..08ba3db0283736
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_bioasq_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from gdario)
+author: John Snow Labs
+name: bert_qa_biobert_bioasq
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert_bioasq` is a English model orginally trained by `gdario`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_bioasq_en_5.2.0_3.0_1699999388451.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_bioasq_en_5.2.0_3.0_1699999388451.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_bioasq","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_bioasq","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.biobert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_bioasq|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/gdario/biobert_bioasq
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_large_cased_v1.1_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_large_cased_v1.1_squad_en.md
new file mode 100644
index 00000000000000..d1bbd37f47803e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_large_cased_v1.1_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Cased model (from dmis-lab)
+author: John Snow Labs
+name: bert_qa_biobert_large_cased_v1.1_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-large-cased-v1.1-squad` is a English model originally trained by `dmis-lab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_large_cased_v1.1_squad_en_5.2.0_3.0_1700002976804.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_large_cased_v1.1_squad_en_5.2.0_3.0_1700002976804.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_large_cased_v1.1_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_biobert_large_cased_v1.1_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.biobert.squad.cased_large").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_large_cased_v1.1_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/dmis-lab/biobert-large-cased-v1.1-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_en.md
new file mode 100644
index 00000000000000..86a6c7d606a6e6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from clagator)
+author: John Snow Labs
+name: bert_qa_biobert_squad2_cased
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert_squad2_cased` is a English model orginally trained by `clagator`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_squad2_cased_en_5.2.0_3.0_1699999675367.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_squad2_cased_en_5.2.0_3.0_1699999675367.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_squad2_cased","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_squad2_cased","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.biobert.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_squad2_cased|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/clagator/biobert_squad2_cased
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..b071e768823a73
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_squad2_cased_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ptnv-s)
+author: John Snow Labs
+name: bert_qa_biobert_squad2_cased_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert_squad2_cased-finetuned-squad` is a English model orginally trained by `ptnv-s`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_squad2_cased_finetuned_squad_en_5.2.0_3.0_1699997805522.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_squad2_cased_finetuned_squad_en_5.2.0_3.0_1699997805522.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_squad2_cased_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_squad2_cased_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.biobert.cased.by_ptnv-s").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_squad2_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ptnv-s/biobert_squad2_cased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_biomedicalquestionanswering_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_biomedicalquestionanswering_en.md
new file mode 100644
index 00000000000000..0e685c2866eadf
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_biomedicalquestionanswering_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Shushant)
+author: John Snow Labs
+name: bert_qa_biobert_v1.1_biomedicalquestionanswering
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert-v1.1-biomedicalQuestionAnswering` is a English model originally trained by `Shushant`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_biomedicalquestionanswering_en_5.2.0_3.0_1699999913959.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_biomedicalquestionanswering_en_5.2.0_3.0_1699999913959.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_v1.1_biomedicalquestionanswering","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_biobert_v1.1_biomedicalquestionanswering","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.biobert.bio_medical.").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_v1.1_biomedicalquestionanswering|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Shushant/biobert-v1.1-biomedicalQuestionAnswering
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_finetuned_squad_en.md
new file mode 100644
index 00000000000000..0d2b64fe6f65c8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from gerardozq)
+author: John Snow Labs
+name: bert_qa_biobert_v1.1_pubmed_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert_v1.1_pubmed-finetuned-squad` is a English model orginally trained by `gerardozq`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_pubmed_finetuned_squad_en_5.2.0_3.0_1700002425889.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_pubmed_finetuned_squad_en_5.2.0_3.0_1700002425889.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_v1.1_pubmed_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_v1.1_pubmed_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad_pubmed.biobert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_v1.1_pubmed_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/gerardozq/biobert_v1.1_pubmed-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_squad_v2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_squad_v2_en.md
new file mode 100644
index 00000000000000..d8aa557873aca5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobert_v1.1_pubmed_squad_v2_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ktrapeznikov)
+author: John Snow Labs
+name: bert_qa_biobert_v1.1_pubmed_squad_v2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biobert_v1.1_pubmed_squad_v2` is a English model orginally trained by `ktrapeznikov`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_pubmed_squad_v2_en_5.2.0_3.0_1700000208525.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobert_v1.1_pubmed_squad_v2_en_5.2.0_3.0_1700000208525.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biobert_v1.1_pubmed_squad_v2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biobert_v1.1_pubmed_squad_v2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2_pubmed.biobert.v2").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobert_v1.1_pubmed_squad_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ktrapeznikov/biobert_v1.1_pubmed_squad_v2
+- https://rajpurkar.github.io/SQuAD-explorer/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobertpt_squad_v1.1_portuguese_pt.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobertpt_squad_v1.1_portuguese_pt.md
new file mode 100644
index 00000000000000..606c82766c6441
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biobertpt_squad_v1.1_portuguese_pt.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: Portuguese bert_qa_biobertpt_squad_v1.1_portuguese BertForQuestionAnswering from pucpr
+author: John Snow Labs
+name: bert_qa_biobertpt_squad_v1.1_portuguese
+date: 2023-11-14
+tags: [bert, pt, open_source, question_answering, onnx]
+task: Question Answering
+language: pt
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_biobertpt_squad_v1.1_portuguese` is a Portuguese model originally trained by pucpr.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biobertpt_squad_v1.1_portuguese_pt_5.2.0_3.0_1699996422588.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biobertpt_squad_v1.1_portuguese_pt_5.2.0_3.0_1699996422588.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biobertpt_squad_v1.1_portuguese|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|pt|
+|Size:|664.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+https://huggingface.co/pucpr/bioBERTpt-squad-v1.1-portuguese
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bioformer_cased_v1.0_squad1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bioformer_cased_v1.0_squad1_en.md
new file mode 100644
index 00000000000000..41912fbbc29c4c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bioformer_cased_v1.0_squad1_en.md
@@ -0,0 +1,110 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from bioformers)
+author: John Snow Labs
+name: bert_qa_bioformer_cased_v1.0_squad1
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bioformer-cased-v1.0-squad1` is a English model orginally trained by `bioformers`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bioformer_cased_v1.0_squad1_en_5.2.0_3.0_1700002661909.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bioformer_cased_v1.0_squad1_en_5.2.0_3.0_1700002661909.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_bioformer_cased_v1.0_squad1","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_bioformer_cased_v1.0_squad1","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bioformer.cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bioformer_cased_v1.0_squad1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|158.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bioformers/bioformer-cased-v1.0-squad1
+- https://rajpurkar.github.io/SQuAD-explorer
+- https://arxiv.org/pdf/1910.01108.pdf
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_base_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_base_en.md
new file mode 100644
index 00000000000000..ec6f3f51862433
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_base_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from healx)
+author: John Snow Labs
+name: bert_qa_biomedical_slot_filling_reader_base
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biomedical-slot-filling-reader-base` is a English model orginally trained by `healx`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biomedical_slot_filling_reader_base_en_5.2.0_3.0_1699996757432.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biomedical_slot_filling_reader_base_en_5.2.0_3.0_1699996757432.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biomedical_slot_filling_reader_base","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biomedical_slot_filling_reader_base","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bio_medical.bert.base").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biomedical_slot_filling_reader_base|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/healx/biomedical-slot-filling-reader-base
+- https://arxiv.org/abs/2109.08564
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_large_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_large_en.md
new file mode 100644
index 00000000000000..2a3adcbaa8e7dd
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_biomedical_slot_filling_reader_large_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from healx)
+author: John Snow Labs
+name: bert_qa_biomedical_slot_filling_reader_large
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `biomedical-slot-filling-reader-large` is a English model orginally trained by `healx`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_biomedical_slot_filling_reader_large_en_5.2.0_3.0_1700003224568.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_biomedical_slot_filling_reader_large_en_5.2.0_3.0_1700003224568.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_biomedical_slot_filling_reader_large","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_biomedical_slot_filling_reader_large","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bio_medical.bert.large").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_biomedical_slot_filling_reader_large|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/healx/biomedical-slot-filling-reader-large
+- https://arxiv.org/abs/2109.08564
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_braquad_bert_qna_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_braquad_bert_qna_en.md
new file mode 100644
index 00000000000000..77dc455317b173
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_braquad_bert_qna_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from piEsposito)
+author: John Snow Labs
+name: bert_qa_braquad_bert_qna
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `braquad-bert-qna` is a English model orginally trained by `piEsposito`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_braquad_bert_qna_en_5.2.0_3.0_1700003224896.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_braquad_bert_qna_en_5.2.0_3.0_1700003224896.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_braquad_bert_qna","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_braquad_bert_qna","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_piEsposito").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_braquad_bert_qna|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|405.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/piEsposito/braquad-bert-qna
+- https://github.com/piEsposito/br-quad-2.0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bsnmldb_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bsnmldb_finetuned_squad_en.md
new file mode 100644
index 00000000000000..6c55472f5c6ec1
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_bsnmldb_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from bsnmldb)
+author: John Snow Labs
+name: bert_qa_bsnmldb_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `bsnmldb`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_bsnmldb_finetuned_squad_en_5.2.0_3.0_1700000456714.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_bsnmldb_finetuned_squad_en_5.2.0_3.0_1700000456714.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bsnmldb_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_bsnmldb_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_bsnmldb_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bsnmldb/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_case_base_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_case_base_en.md
new file mode 100644
index 00000000000000..049f19689290fa
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_case_base_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from srcocotero)
+author: John Snow Labs
+name: bert_qa_case_base
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-qa` is a English model originally trained by `srcocotero`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_case_base_en_5.2.0_3.0_1699998107954.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_case_base_en_5.2.0_3.0_1699998107954.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_case_base","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_case_base","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_case_base|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/srcocotero/bert-base-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_causal_qa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_causal_qa_en.md
new file mode 100644
index 00000000000000..1f012b02e0d551
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_causal_qa_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from manav)
+author: John Snow Labs
+name: bert_qa_causal_qa
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `causal_qa` is a English model orginally trained by `manav`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_causal_qa_en_5.2.0_3.0_1700003870784.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_causal_qa_en_5.2.0_3.0_1700003870784.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_causal_qa","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_causal_qa","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_manav").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_causal_qa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/manav/causal_qa
+- https://github.com/kstats/CausalQG
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cgt_roberta_wwm_ext_large_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cgt_roberta_wwm_ext_large_zh.md
new file mode 100644
index 00000000000000..302fb156323515
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cgt_roberta_wwm_ext_large_zh.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering Large Cased model (from cgt)
+author: John Snow Labs
+name: bert_qa_cgt_roberta_wwm_ext_large
+date: 2023-11-14
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Roberta-wwm-ext-large-qa` is a Chinese model originally trained by `cgt`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_cgt_roberta_wwm_ext_large_zh_5.2.0_3.0_1699998636857.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_cgt_roberta_wwm_ext_large_zh_5.2.0_3.0_1699998636857.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cgt_roberta_wwm_ext_large","zh")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cgt_roberta_wwm_ext_large","zh")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_cgt_roberta_wwm_ext_large|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/cgt/Roberta-wwm-ext-large-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chemical_bert_uncased_squad2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chemical_bert_uncased_squad2_en.md
new file mode 100644
index 00000000000000..5e6c07eaf348c0
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chemical_bert_uncased_squad2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from recobo)
+author: John Snow Labs
+name: bert_qa_chemical_bert_uncased_squad2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chemical-bert-uncased-squad2` is a English model orginally trained by `recobo`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chemical_bert_uncased_squad2_en_5.2.0_3.0_1699998908659.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chemical_bert_uncased_squad2_en_5.2.0_3.0_1699998908659.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chemical_bert_uncased_squad2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_chemical_bert_uncased_squad2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2_chemical.bert.uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chemical_bert_uncased_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|409.7 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/recobo/chemical-bert-uncased-squad2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_base_mrc_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_base_mrc_zh.md
new file mode 100644
index 00000000000000..274ffbf272f881
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_base_mrc_zh.md
@@ -0,0 +1,116 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from hfl)
+author: John Snow Labs
+name: bert_qa_chinese_pert_base_mrc
+date: 2023-11-14
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chinese-pert-base-mrc` is a Chinese model orginally trained by `hfl`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_base_mrc_zh_5.2.0_3.0_1699997205949.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_base_mrc_zh_5.2.0_3.0_1699997205949.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chinese_pert_base_mrc","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_chinese_pert_base_mrc","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert.base").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_pert_base_mrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|381.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/hfl/chinese-pert-base-mrc
+- https://github.com/ymcui/PERT
+- https://github.com/ymcui/Chinese-ELECTRA
+- https://github.com/ymcui/Chinese-Minority-PLM
+- https://github.com/ymcui/HFL-Anthology
+- https://github.com/ymcui/Chinese-BERT-wwm
+- https://github.com/ymcui/Chinese-XLNet
+- https://github.com/airaria/TextBrewer
+- https://github.com/ymcui/MacBERT
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_mrc_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_mrc_zh.md
new file mode 100644
index 00000000000000..0264946e070ae6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_mrc_zh.md
@@ -0,0 +1,116 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from hfl)
+author: John Snow Labs
+name: bert_qa_chinese_pert_large_mrc
+date: 2023-11-14
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chinese-pert-large-mrc` is a Chinese model orginally trained by `hfl`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_large_mrc_zh_5.2.0_3.0_1700003780458.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_large_mrc_zh_5.2.0_3.0_1700003780458.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chinese_pert_large_mrc","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_chinese_pert_large_mrc","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert.large.by_hfl").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_pert_large_mrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/hfl/chinese-pert-large-mrc
+- https://github.com/ymcui/PERT
+- https://github.com/ymcui/Chinese-ELECTRA
+- https://github.com/ymcui/Chinese-Minority-PLM
+- https://github.com/ymcui/HFL-Anthology
+- https://github.com/ymcui/Chinese-BERT-wwm
+- https://github.com/ymcui/Chinese-XLNet
+- https://github.com/airaria/TextBrewer
+- https://github.com/ymcui/MacBERT
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_open_domain_mrc_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_open_domain_mrc_zh.md
new file mode 100644
index 00000000000000..e559db969f20da
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pert_large_open_domain_mrc_zh.md
@@ -0,0 +1,101 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from qalover)
+author: John Snow Labs
+name: bert_qa_chinese_pert_large_open_domain_mrc
+date: 2023-11-14
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chinese-pert-large-open-domain-mrc` is a Chinese model originally trained by `qalover`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_large_open_domain_mrc_zh_5.2.0_3.0_1699999466545.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pert_large_open_domain_mrc_zh_5.2.0_3.0_1699999466545.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chinese_pert_large_open_domain_mrc","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer")\
+.setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["PUT YOUR QUESTION HERE", "PUT YOUR CONTEXT HERE"]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+.setInputCols(Array("question", "context"))
+.setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_chinese_pert_large_open_domain_mrc","zh")
+.setInputCols(Array("document", "token"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("PUT YOUR QUESTION HERE", "PUT YOUR CONTEXT HERE").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert.large").predict("""PUT YOUR QUESTION HERE|||"PUT YOUR CONTEXT HERE""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_pert_large_open_domain_mrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/qalover/chinese-pert-large-open-domain-mrc
+- https://github.com/dbiir/UER-py/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_macbert_large_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_macbert_large_zh.md
new file mode 100644
index 00000000000000..cf5f8096b68a25
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_macbert_large_zh.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from luhua)
+author: John Snow Labs
+name: bert_qa_chinese_pretrain_mrc_macbert_large
+date: 2023-11-14
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chinese_pretrain_mrc_macbert_large` is a Chinese model orginally trained by `luhua`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pretrain_mrc_macbert_large_zh_5.2.0_3.0_1700004370480.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pretrain_mrc_macbert_large_zh_5.2.0_3.0_1700004370480.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chinese_pretrain_mrc_macbert_large","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_chinese_pretrain_mrc_macbert_large","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.mac_bert.large").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_pretrain_mrc_macbert_large|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/luhua/chinese_pretrain_mrc_macbert_large
+- https://github.com/basketballandlearn/MRC_Competition_Dureader
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large_zh.md
new file mode 100644
index 00000000000000..183560d6b09e13
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large_zh.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from luhua)
+author: John Snow Labs
+name: bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large
+date: 2023-11-14
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `chinese_pretrain_mrc_roberta_wwm_ext_large` is a Chinese model orginally trained by `luhua`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large_zh_5.2.0_3.0_1700000001080.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large_zh_5.2.0_3.0_1700000001080.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert.large.by_luhua").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_pretrain_mrc_roberta_wwm_ext_large|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/luhua/chinese_pretrain_mrc_roberta_wwm_ext_large
+- https://github.com/basketballandlearn/MRC_Competition_Dureader
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_question_answering_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_question_answering_zh.md
new file mode 100644
index 00000000000000..c195789888dec3
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinese_question_answering_zh.md
@@ -0,0 +1,97 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering Cased model (from NchuNLP)
+author: John Snow Labs
+name: bert_qa_chinese_question_answering
+date: 2023-11-14
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Chinese-Question-Answering` is a Chinese model originally trained by `NchuNLP`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_question_answering_zh_5.2.0_3.0_1700000722663.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinese_question_answering_zh_5.2.0_3.0_1700000722663.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_chinese_question_answering","zh")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_chinese_question_answering","zh")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinese_question_answering|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|381.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/NchuNLP/Chinese-Question-Answering
+- https://nlpnchu.org/
+- https://demo.nlpnchu.org/
+- https://github.com/NCHU-NLP-Lab
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinesebert_zh.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinesebert_zh.md
new file mode 100644
index 00000000000000..a902430606f116
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_chinesebert_zh.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering Cased model (from dengwei072)
+author: John Snow Labs
+name: bert_qa_chinesebert
+date: 2023-11-14
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ChineseBERT` is a Chinese model originally trained by `dengwei072`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_chinesebert_zh_5.2.0_3.0_1700000253518.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_chinesebert_zh_5.2.0_3.0_1700000253518.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_chinesebert","zh")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_chinesebert","zh")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_chinesebert|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|381.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/dengwei072/ChineseBERT
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covid_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covid_squad_en.md
new file mode 100644
index 00000000000000..48e421202de042
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covid_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from graviraja)
+author: John Snow Labs
+name: bert_qa_covid_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `covid_squad` is a English model orginally trained by `graviraja`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_covid_squad_en_5.2.0_3.0_1699997520672.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_covid_squad_en_5.2.0_3.0_1699997520672.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_covid_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_covid_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad_covid.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_covid_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/graviraja/covid_squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covidbert_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covidbert_squad_en.md
new file mode 100644
index 00000000000000..029ed8b2301846
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_covidbert_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from graviraja)
+author: John Snow Labs
+name: bert_qa_covidbert_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `covidbert_squad` is a English model orginally trained by `graviraja`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_covidbert_squad_en_5.2.0_3.0_1700004666926.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_covidbert_squad_en_5.2.0_3.0_1700004666926.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_covidbert_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_covidbert_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.covid_bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_covidbert_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/graviraja/covidbert_squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_csarron_bert_base_uncased_squad_v1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_csarron_bert_base_uncased_squad_v1_en.md
new file mode 100644
index 00000000000000..971e4b8c65255f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_csarron_bert_base_uncased_squad_v1_en.md
@@ -0,0 +1,114 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from csarron)
+author: John Snow Labs
+name: bert_qa_csarron_bert_base_uncased_squad_v1
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-squad-v1` is a English model orginally trained by `csarron`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_csarron_bert_base_uncased_squad_v1_en_5.2.0_3.0_1700004972342.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_csarron_bert_base_uncased_squad_v1_en_5.2.0_3.0_1700004972342.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_csarron_bert_base_uncased_squad_v1","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_csarron_bert_base_uncased_squad_v1","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.base_uncased.by_csarron").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_csarron_bert_base_uncased_squad_v1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/csarron/bert-base-uncased-squad-v1
+- https://twitter.com/sysnlp
+- https://awk.ai/
+- https://github.com/csarron
+- https://www.aclweb.org/anthology/N19-1423/
+- https://rajpurkar.github.io/SQuAD-explorer
+- https://www.aclweb.org/anthology/N19-1423.pdf
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_bad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_bad_en.md
new file mode 100644
index 00000000000000..c6ff6bb5182c6c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_bad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from beautifulpichai)
+author: John Snow Labs
+name: bert_qa_cuad_pol_bad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `cuad_pol_bad` is a English model originally trained by `beautifulpichai`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_cuad_pol_bad_en_5.2.0_3.0_1700001346182.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_cuad_pol_bad_en_5.2.0_3.0_1700001346182.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cuad_pol_bad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cuad_pol_bad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_cuad_pol_bad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/beautifulpichai/cuad_pol_bad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_good_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_good_en.md
new file mode 100644
index 00000000000000..5ea95029763531
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cuad_pol_good_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from beautifulpichai)
+author: John Snow Labs
+name: bert_qa_cuad_pol_good
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `cuad_pol_good` is a English model originally trained by `beautifulpichai`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_cuad_pol_good_en_5.2.0_3.0_1700000901597.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_cuad_pol_good_en_5.2.0_3.0_1700000901597.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cuad_pol_good","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cuad_pol_good","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_cuad_pol_good|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/beautifulpichai/cuad_pol_good
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cyrusmv_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cyrusmv_finetuned_squad_en.md
new file mode 100644
index 00000000000000..a25b02a08e42c3
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_cyrusmv_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from cyrusmv)
+author: John Snow Labs
+name: bert_qa_cyrusmv_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `cyrusmv`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_cyrusmv_finetuned_squad_en_5.2.0_3.0_1700004138417.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_cyrusmv_finetuned_squad_en_5.2.0_3.0_1700004138417.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cyrusmv_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_cyrusmv_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_cyrusmv_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/cyrusmv/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_danish_bert_botxo_qa_squad_da.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_danish_bert_botxo_qa_squad_da.md
new file mode 100644
index 00000000000000..fe1ba0a3b1b433
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_danish_bert_botxo_qa_squad_da.md
@@ -0,0 +1,111 @@
+---
+layout: model
+title: Danish BertForQuestionAnswering model (from jacobshein)
+author: John Snow Labs
+name: bert_qa_danish_bert_botxo_qa_squad
+date: 2023-11-14
+tags: [da, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: da
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `danish-bert-botxo-qa-squad` is a Danish model orginally trained by `jacobshein`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_danish_bert_botxo_qa_squad_da_5.2.0_3.0_1700001626558.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_danish_bert_botxo_qa_squad_da_5.2.0_3.0_1700001626558.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_danish_bert_botxo_qa_squad","da") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_danish_bert_botxo_qa_squad","da")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("da.answer_question.squad.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_danish_bert_botxo_qa_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|da|
+|Size:|412.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jacobshein/danish-bert-botxo-qa-squad
+- https://jacobhein.com/#contact
+- https://github.com/botxo/nordic_bert
+- https://github.com/ccasimiro88/TranslateAlignRetrieve/tree/multilingual/squads-tar/da
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_darshana1406_base_multilingual_cased_finetuned_squad_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_darshana1406_base_multilingual_cased_finetuned_squad_xx.md
new file mode 100644
index 00000000000000..ef70182fedf4a8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_darshana1406_base_multilingual_cased_finetuned_squad_xx.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering Base Cased model (from darshana1406)
+author: John Snow Labs
+name: bert_qa_darshana1406_base_multilingual_cased_finetuned_squad
+date: 2023-11-14
+tags: [xx, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-squad` is a Multilingual model originally trained by `darshana1406`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_darshana1406_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700001977877.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_darshana1406_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700001977877.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_darshana1406_base_multilingual_cased_finetuned_squad","xx")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_darshana1406_base_multilingual_cased_finetuned_squad","xx")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_darshana1406_base_multilingual_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/darshana1406/bert-base-multilingual-cased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dbg_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dbg_finetuned_squad_en.md
new file mode 100644
index 00000000000000..520051cb620b7d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dbg_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Shanny)
+author: John Snow Labs
+name: bert_qa_dbg_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `dbgbert-finetuned-squad` is a English model originally trained by `Shanny`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_dbg_finetuned_squad_en_5.2.0_3.0_1700002317552.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_dbg_finetuned_squad_en_5.2.0_3.0_1700002317552.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dbg_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dbg_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_dbg_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Shanny/dbgbert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deberta_v3_base_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deberta_v3_base_en.md
new file mode 100644
index 00000000000000..60e852afc94d90
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deberta_v3_base_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from vvincentt)
+author: John Snow Labs
+name: bert_qa_deberta_v3_base
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deberta-v3-base` is a English model originally trained by `vvincentt`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deberta_v3_base_en_5.2.0_3.0_1700005203611.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deberta_v3_base_en_5.2.0_3.0_1700005203611.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deberta_v3_base","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deberta_v3_base","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deberta_v3_base|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/vvincentt/deberta-v3-base
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_debug_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_debug_squad_en.md
new file mode 100644
index 00000000000000..687023d74f7d24
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_debug_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ArpanZS)
+author: John Snow Labs
+name: bert_qa_debug_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `debug_squad` is a English model orginally trained by `ArpanZS`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_debug_squad_en_5.2.0_3.0_1700005482684.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_debug_squad_en_5.2.0_3.0_1700005482684.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_debug_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_debug_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_ArpanZS").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_debug_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|408.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ArpanZS/debug_squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_2_ru.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_2_ru.md
new file mode 100644
index 00000000000000..1aa0189a11f141
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_2_ru.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Russian BertForQuestionAnswering Cased model (from ruselkomp)
+author: John Snow Labs
+name: bert_qa_deep_pavlov_full_2
+date: 2023-11-14
+tags: [ru, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ru
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deep-pavlov-full-2` is a Russian model originally trained by `ruselkomp`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deep_pavlov_full_2_ru_5.2.0_3.0_1700005836423.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deep_pavlov_full_2_ru_5.2.0_3.0_1700005836423.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_deep_pavlov_full_2","ru") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Как меня зовут?", "Меня зовут Клара, и я живу в Беркли."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_deep_pavlov_full_2","ru")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Как меня зовут?", "Меня зовут Клара, и я живу в Беркли.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deep_pavlov_full_2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ru|
+|Size:|664.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ruselkomp/deep-pavlov-full-2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_ru.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_ru.md
new file mode 100644
index 00000000000000..dc5934f45de403
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deep_pavlov_full_ru.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Russian BertForQuestionAnswering Cased model (from ruselkomp)
+author: John Snow Labs
+name: bert_qa_deep_pavlov_full
+date: 2023-11-14
+tags: [ru, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ru
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deep-pavlov-full` is a Russian model originally trained by `ruselkomp`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deep_pavlov_full_ru_5.2.0_3.0_1700002688118.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deep_pavlov_full_ru_5.2.0_3.0_1700002688118.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_deep_pavlov_full","ru") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Как меня зовут?", "Меня зовут Клара, и я живу в Беркли."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_deep_pavlov_full","ru")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Как меня зовут?", "Меня зовут Клара, и я живу в Беркли.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deep_pavlov_full|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ru|
+|Size:|664.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ruselkomp/deep-pavlov-full
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_bert_base_uncased_squad2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_bert_base_uncased_squad2_en.md
new file mode 100644
index 00000000000000..f50405c834a5b9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_bert_base_uncased_squad2_en.md
@@ -0,0 +1,118 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from deepset)
+author: John Snow Labs
+name: bert_qa_deepset_bert_base_uncased_squad2
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-squad2` is a English model orginally trained by `deepset`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_bert_base_uncased_squad2_en_5.2.0_3.0_1700001241191.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_bert_base_uncased_squad2_en_5.2.0_3.0_1700001241191.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_deepset_bert_base_uncased_squad2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_deepset_bert_base_uncased_squad2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_bert_base_uncased_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/deepset/bert-base-uncased-squad2
+- https://github.com/deepset-ai/haystack/discussions
+- https://deepset.ai
+- https://twitter.com/deepset_ai
+- http://www.deepset.ai/jobs
+- https://haystack.deepset.ai/community/join
+- https://github.com/deepset-ai/haystack/
+- https://deepset.ai/german-bert
+- https://www.linkedin.com/company/deepset-ai/
+- https://github.com/deepset-ai/FARM
+- https://deepset.ai/germanquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4_en.md
new file mode 100644
index 00000000000000..ce91b0591cd8de
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-how-1e-4` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4_en_5.2.0_3.0_1700002894184.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4_en_5.2.0_3.0_1700002894184.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_how_1e_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-how-1e-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05_en.md
new file mode 100644
index 00000000000000..e96d7f9b812b37
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-how-5e-05` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05_en_5.2.0_3.0_1700001415991.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05_en_5.2.0_3.0_1700001415991.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_how_5e_05|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-how-5e-05
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4_en.md
new file mode 100644
index 00000000000000..b0092ca467b768
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4 BertForQuestionAnswering from Moussab
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4` is a English model originally trained by Moussab.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4_en_5.2.0_3.0_1700005977978.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_1e_4_en_5.2.0_3.0_1700005977978.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_norwegian_label_5e_05|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+
+## References
+
+https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-no-label-5e-05
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4_en.md
new file mode 100644
index 00000000000000..63c9ff642ef515
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-what-1e-4` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4_en_5.2.0_3.0_1700003146226.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4_en_5.2.0_3.0_1700003146226.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_what_1e_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-what-1e-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05_en.md
new file mode 100644
index 00000000000000..f1f94ed61724ae
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-what-5e-05` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05_en_5.2.0_3.0_1700004329305.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05_en_5.2.0_3.0_1700004329305.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_what_5e_05|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-what-5e-05
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4_en.md
new file mode 100644
index 00000000000000..9f4df7d88b4096
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-which-1e-4` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4_en_5.2.0_3.0_1700006148238.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4_en_5.2.0_3.0_1700006148238.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_which_1e_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-which-1e-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05_en.md
new file mode 100644
index 00000000000000..5bb4f65f416270
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from Moussab)
+author: John Snow Labs
+name: bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-minilm-uncased-squad2-orkg-which-5e-05` is a English model originally trained by `Moussab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05_en_5.2.0_3.0_1700004505412.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05_en_5.2.0_3.0_1700004505412.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_deepset_minilm_uncased_squad2_orkg_which_5e_05|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Moussab/deepset-minilm-uncased-squad2-orkg-which-5e-05
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_demo_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_demo_en.md
new file mode 100644
index 00000000000000..73ce15699db1ad
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_demo_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from internetoftim)
+author: John Snow Labs
+name: bert_qa_demo
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `demo` is a English model orginally trained by `internetoftim`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_demo_en_5.2.0_3.0_1700002057115.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_demo_en_5.2.0_3.0_1700002057115.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_demo","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_demo","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_internetoftim").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_demo|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|797.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/internetoftim/demo
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_base_uncased_finetuned_custom_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_base_uncased_finetuned_custom_en.md
new file mode 100644
index 00000000000000..84546f5cf4153b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_base_uncased_finetuned_custom_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from kamilali)
+author: John Snow Labs
+name: bert_qa_distilbert_base_uncased_finetuned_custom
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilbert-base-uncased-finetuned-custom` is a English model orginally trained by `kamilali`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_distilbert_base_uncased_finetuned_custom_en_5.2.0_3.0_1700002555606.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_distilbert_base_uncased_finetuned_custom_en_5.2.0_3.0_1700002555606.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_distilbert_base_uncased_finetuned_custom","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_distilbert_base_uncased_finetuned_custom","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.distilled_base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_distilbert_base_uncased_finetuned_custom|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/kamilali/distilbert-base-uncased-finetuned-custom
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_turkish_q_a_tr.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_turkish_q_a_tr.md
new file mode 100644
index 00000000000000..1d777e143b4d11
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_distilbert_turkish_q_a_tr.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Turkish bert_qa_distilbert_turkish_q_a BertForQuestionAnswering from emre
+author: John Snow Labs
+name: bert_qa_distilbert_turkish_q_a
+date: 2023-11-14
+tags: [bert, tr, open_source, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_distilbert_turkish_q_a` is a Turkish model originally trained by emre.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_distilbert_turkish_q_a_tr_5.2.0_3.0_1699997767429.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_distilbert_turkish_q_a_tr_5.2.0_3.0_1699997767429.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_distill_bert_base_spanish_wwm_cased_finetuned_spa_squad2_spanish_mrm8488|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|es|
+|Size:|409.5 MB|
+
+## References
+
+https://huggingface.co/mrm8488/distill-bert-base-spanish-wwm-cased-finetuned-spa-squad2-es
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad_en.md
new file mode 100644
index 00000000000000..3ef2332cde7979
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad_en.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: English BertForQuestionAnswering Uncased model (from DL4NLP-Group11)
+author: John Snow Labs
+name: bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `xtremedistil-l6-h256-uncased-squad` is a English model originally trained by `DL4NLP-Group11`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad_en_5.2.0_3.0_1700004805396.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad_en_5.2.0_3.0_1700004805396.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_dl4nlp_group11_xtremedistil_l6_h256_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|47.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/DL4NLP-Group11/xtremedistil-l6-h256-uncased-squad
+- https://github.com/mrqa/MRQA-Shared-Task-2019
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dry_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dry_finetuned_squad_en.md
new file mode 100644
index 00000000000000..f647e471fb321b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dry_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from DrY)
+author: John Snow Labs
+name: bert_qa_dry_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `DrY`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_dry_finetuned_squad_en_5.2.0_3.0_1700006372734.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_dry_finetuned_squad_en_5.2.0_3.0_1700006372734.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dry_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dry_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_dry_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/DrY/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dylan1999_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dylan1999_finetuned_squad_en.md
new file mode 100644
index 00000000000000..e0203a4be66901
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_dylan1999_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Dylan1999)
+author: John Snow Labs
+name: bert_qa_dylan1999_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `Dylan1999`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_dylan1999_finetuned_squad_en_5.2.0_3.0_1700005104544.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_dylan1999_finetuned_squad_en_5.2.0_3.0_1700005104544.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dylan1999_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dylan1999_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_dylan1999_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Dylan1999/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fabianwillner_base_uncased_finetuned_trivia_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fabianwillner_base_uncased_finetuned_trivia_en.md
new file mode 100644
index 00000000000000..8ce9f5fea75289
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fabianwillner_base_uncased_finetuned_trivia_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from FabianWillner)
+author: John Snow Labs
+name: bert_qa_fabianwillner_base_uncased_finetuned_trivia
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-finetuned-triviaqa` is a English model originally trained by `FabianWillner`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fabianwillner_base_uncased_finetuned_trivia_en_5.2.0_3.0_1699998057296.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fabianwillner_base_uncased_finetuned_trivia_en_5.2.0_3.0_1699998057296.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fabianwillner_base_uncased_finetuned_trivia","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fabianwillner_base_uncased_finetuned_trivia","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fabianwillner_base_uncased_finetuned_trivia|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/FabianWillner/bert-base-uncased-finetuned-triviaqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_faquad_base_portuguese_cased_pt.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_faquad_base_portuguese_cased_pt.md
new file mode 100644
index 00000000000000..8f29534636df50
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_faquad_base_portuguese_cased_pt.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: Portuguese BertForQuestionAnswering Base Cased model (from eraldoluis)
+author: John Snow Labs
+name: bert_qa_faquad_base_portuguese_cased
+date: 2023-11-14
+tags: [pt, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: pt
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `faquad-bert-base-portuguese-cased` is a Portuguese model originally trained by `eraldoluis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_faquad_base_portuguese_cased_pt_5.2.0_3.0_1700003519252.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_faquad_base_portuguese_cased_pt_5.2.0_3.0_1700003519252.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_faquad_base_portuguese_cased","pt")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_faquad_base_portuguese_cased","pt")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_faquad_base_portuguese_cased|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|pt|
+|Size:|405.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/eraldoluis/faquad-bert-base-portuguese-cased
+- https://paperswithcode.com/sota?task=Extractive+Question-Answering&dataset=FaQuAD
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fewrel_zero_shot_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fewrel_zero_shot_en.md
new file mode 100644
index 00000000000000..19ac5324628bcd
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fewrel_zero_shot_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from fractalego)
+author: John Snow Labs
+name: bert_qa_fewrel_zero_shot
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fewrel-zero-shot` is a English model orginally trained by `fractalego`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fewrel_zero_shot_en_5.2.0_3.0_1700004037037.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fewrel_zero_shot_en_5.2.0_3.0_1700004037037.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_fewrel_zero_shot","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_fewrel_zero_shot","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.zero_shot").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fewrel_zero_shot|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/fractalego/fewrel-zero-shot
+- https://www.aclweb.org/anthology/2020.coling-main.124
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_financial_v2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_financial_v2_en.md
new file mode 100644
index 00000000000000..9bc9c2cf36fbd7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_financial_v2_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from anablasi)
+author: John Snow Labs
+name: bert_qa_financial_v2
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qa_financial_v2` is a English model originally trained by `anablasi`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_financial_v2_en_5.2.0_3.0_1700002846482.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_financial_v2_en_5.2.0_3.0_1700002846482.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_financial_v2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_financial_v2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_financial_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anablasi/qa_financial_v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_squad_aip_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_squad_aip_en.md
new file mode 100644
index 00000000000000..6ed8e11a8d742c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_squad_aip_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from Kutay)
+author: John Snow Labs
+name: bert_qa_fine_tuned_squad_aip
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fine_tuned_squad_aip` is a English model orginally trained by `Kutay`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fine_tuned_squad_aip_en_5.2.0_3.0_1700004346563.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fine_tuned_squad_aip_en_5.2.0_3.0_1700004346563.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_fine_tuned_squad_aip","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_fine_tuned_squad_aip","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_Kutay").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fine_tuned_squad_aip|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Kutay/fine_tuned_squad_aip
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_tweetqa_aip_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_tweetqa_aip_en.md
new file mode 100644
index 00000000000000..a593f1ec481799
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fine_tuned_tweetqa_aip_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from Kutay)
+author: John Snow Labs
+name: bert_qa_fine_tuned_tweetqa_aip
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fine_tuned_tweetqa_aip` is a English model orginally trained by `Kutay`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fine_tuned_tweetqa_aip_en_5.2.0_3.0_1699998354333.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fine_tuned_tweetqa_aip_en_5.2.0_3.0_1699998354333.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_fine_tuned_tweetqa_aip","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_fine_tuned_tweetqa_aip","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.trivia.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fine_tuned_tweetqa_aip|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Kutay/fine_tuned_tweetqa_aip
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_bert_base_v1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_bert_base_v1_en.md
new file mode 100644
index 00000000000000..e88d596accca67
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_bert_base_v1_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peggyhuang)
+author: John Snow Labs
+name: bert_qa_finetune_bert_base_v1
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `finetune-bert-base-v1` is a English model orginally trained by `peggyhuang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v1_en_5.2.0_3.0_1700003144412.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v1_en_5.2.0_3.0_1700003144412.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_finetune_bert_base_v1","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_finetune_bert_base_v1","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base.by_peggyhuang").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetune_bert_base_v1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peggyhuang/finetune-bert-base-v1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_scibert_v2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_scibert_v2_en.md
new file mode 100644
index 00000000000000..7b048206f09a38
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetune_scibert_v2_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from peggyhuang)
+author: John Snow Labs
+name: bert_qa_finetune_scibert_v2
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `finetune-SciBert-v2` is a English model originally trained by `peggyhuang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_scibert_v2_en_5.2.0_3.0_1700003411813.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_scibert_v2_en_5.2.0_3.0_1700003411813.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_finetune_scibert_v2","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_finetune_scibert_v2","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.scibert.scibert.v2").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetune_scibert_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|409.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peggyhuang/finetune-SciBert-v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_2_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_2_en.md
new file mode 100644
index 00000000000000..2234dd94b7be73
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_2_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from VedantS01)
+author: John Snow Labs
+name: bert_qa_finetuned_custom_2
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-custom-2` is a English model originally trained by `VedantS01`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_2_en_5.2.0_3.0_1700003752620.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_2_en_5.2.0_3.0_1700003752620.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetuned_custom_2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/VedantS01/bert-finetuned-custom-2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_en.md
new file mode 100644
index 00000000000000..9af8271ddf15ed
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_custom_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from VedantS01)
+author: John Snow Labs
+name: bert_qa_finetuned_custom
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-custom` is a English model originally trained by `VedantS01`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_en_5.2.0_3.0_1700005469442.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_en_5.2.0_3.0_1700005469442.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetuned_custom|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/VedantS01/bert-finetuned-custom
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_squad_transformerfrozen_testtoken_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_squad_transformerfrozen_testtoken_en.md
new file mode 100644
index 00000000000000..fd4b5191cca819
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_squad_transformerfrozen_testtoken_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from DaisyMak)
+author: John Snow Labs
+name: bert_qa_finetuned_squad_transformerfrozen_testtoken
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-transformerfrozen-testtoken` is a English model originally trained by `DaisyMak`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_squad_transformerfrozen_testtoken_en_5.2.0_3.0_1700005818068.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_squad_transformerfrozen_testtoken_en_5.2.0_3.0_1700005818068.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_finetuned_squad_transformerfrozen_testtoken","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_finetuned_squad_transformerfrozen_testtoken","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_DaisyMak").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetuned_squad_transformerfrozen_testtoken|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/DaisyMak/bert-finetuned-squad-transformerfrozen-testtoken
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_uia_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_uia_en.md
new file mode 100644
index 00000000000000..20dabb53f72552
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_finetuned_uia_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from eibakke)
+author: John Snow Labs
+name: bert_qa_finetuned_uia
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-uia` is a English model originally trained by `eibakke`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_uia_en_5.2.0_3.0_1700004690933.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_uia_en_5.2.0_3.0_1700004690933.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_uia","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_uia","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetuned_uia|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/eibakke/bert-finetuned-uia
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_firmanindolanguagemodel_id.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_firmanindolanguagemodel_id.md
new file mode 100644
index 00000000000000..35eb8791908e3d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_firmanindolanguagemodel_id.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: Indonesian BertForQuestionAnswering Cased model (from FirmanBr)
+author: John Snow Labs
+name: bert_qa_firmanindolanguagemodel
+date: 2023-11-14
+tags: [id, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: id
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `FirmanIndoLanguageModel` is a Indonesian model originally trained by `FirmanBr`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_firmanindolanguagemodel_id_5.2.0_3.0_1700004939155.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_firmanindolanguagemodel_id_5.2.0_3.0_1700004939155.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_firmanindolanguagemodel","id") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Siapa namaku?", "Nama saya Clara dan saya tinggal di Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_firmanindolanguagemodel","id")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Siapa namaku?", "Nama saya Clara dan saya tinggal di Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("id.answer_question.bert.lang").predict("""Siapa namaku?|||"Nama saya Clara dan saya tinggal di Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_firmanindolanguagemodel|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|id|
+|Size:|412.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/FirmanBr/FirmanIndoLanguageModel
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa_en.md
new file mode 100644
index 00000000000000..d19ae638daf707
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa BertForQuestionAnswering from AnonymousSub
+author: John Snow Labs
+name: bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa` is a English model originally trained by AnonymousSub.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa_en_5.2.0_3.0_1700005142570.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_bert_ft_nepal_bhasa_newsqa_en_5.2.0_3.0_1700005142570.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fpdm_triplet_bert_ft_newsqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+https://huggingface.co/AnonymousSub/fpdm_triplet_bert_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hebert_finetuned_hebrew_squad_he.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hebert_finetuned_hebrew_squad_he.md
new file mode 100644
index 00000000000000..d76340e5aca657
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hebert_finetuned_hebrew_squad_he.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Hebrew BertForQuestionAnswering model (from tdklab)
+author: John Snow Labs
+name: bert_qa_hebert_finetuned_hebrew_squad
+date: 2023-11-14
+tags: [he, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: he
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `hebert-finetuned-hebrew-squad` is a Hebrew model orginally trained by `tdklab`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hebert_finetuned_hebrew_squad_he_5.2.0_3.0_1700004422238.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hebert_finetuned_hebrew_squad_he_5.2.0_3.0_1700004422238.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_hebert_finetuned_hebrew_squad","he") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_hebert_finetuned_hebrew_squad","he")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("he.answer_question.squad.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_hebert_finetuned_hebrew_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|he|
+|Size:|408.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/tdklab/hebert-finetuned-hebrew-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hendrixcosta_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hendrixcosta_en.md
new file mode 100644
index 00000000000000..d3a51f983ed0fa
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hendrixcosta_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from hendrixcosta)
+author: John Snow Labs
+name: bert_qa_hendrixcosta
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `hendrixcosta` is a English model originally trained by `hendrixcosta`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hendrixcosta_en_5.2.0_3.0_1700005668071.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hendrixcosta_en_5.2.0_3.0_1700005668071.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_hendrixcosta","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_hendrixcosta","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_hendrixcosta").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_hendrixcosta|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|404.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/hendrixcosta/hendrixcosta
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hf_internal_testing_tiny_random_forquestionanswering_ja.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hf_internal_testing_tiny_random_forquestionanswering_ja.md
new file mode 100644
index 00000000000000..523a3ab58c1493
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hf_internal_testing_tiny_random_forquestionanswering_ja.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Japanese BertForQuestionAnswering Tiny Cased model (from hf-internal-testing)
+author: John Snow Labs
+name: bert_qa_hf_internal_testing_tiny_random_forquestionanswering
+date: 2023-11-14
+tags: [ja, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ja
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `tiny-random-BertForQuestionAnswering` is a Japanese model originally trained by `hf-internal-testing`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hf_internal_testing_tiny_random_forquestionanswering_ja_5.2.0_3.0_1700006333001.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hf_internal_testing_tiny_random_forquestionanswering_ja_5.2.0_3.0_1700006333001.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_hf_internal_testing_tiny_random_forquestionanswering","ja")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_hf_internal_testing_tiny_random_forquestionanswering","ja")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_hf_internal_testing_tiny_random_forquestionanswering|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ja|
+|Size:|346.4 KB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/hf-internal-testing/tiny-random-BertForQuestionAnswering
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hkhkhkhk_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hkhkhkhk_finetuned_squad_en.md
new file mode 100644
index 00000000000000..507c72cd5b5b98
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hkhkhkhk_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from HKHKHKHK)
+author: John Snow Labs
+name: bert_qa_hkhkhkhk_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `HKHKHKHK`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hkhkhkhk_finetuned_squad_en_5.2.0_3.0_1699998618156.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hkhkhkhk_finetuned_squad_en_5.2.0_3.0_1699998618156.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_hkhkhkhk_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_hkhkhkhk_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_hkhkhkhk_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/HKHKHKHK/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huawei_noahtiny_general_6l_768_hotpot_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huawei_noahtiny_general_6l_768_hotpot_en.md
new file mode 100644
index 00000000000000..4aa9c963426a49
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huawei_noahtiny_general_6l_768_hotpot_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Tiny Cased model (from DL4NLP-Group4)
+author: John Snow Labs
+name: bert_qa_huawei_noahtiny_general_6l_768_hotpot
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `huawei-noahTinyBERT_General_6L_768_HotpotQA` is a English model originally trained by `DL4NLP-Group4`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_huawei_noahtiny_general_6l_768_hotpot_en_5.2.0_3.0_1699999043426.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_huawei_noahtiny_general_6l_768_hotpot_en_5.2.0_3.0_1699999043426.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_huawei_noahtiny_general_6l_768_hotpot","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_huawei_noahtiny_general_6l_768_hotpot","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_huawei_noahtiny_general_6l_768_hotpot|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|248.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/DL4NLP-Group4/huawei-noahTinyBERT_General_6L_768_HotpotQA
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_accelerate_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_accelerate_en.md
new file mode 100644
index 00000000000000..3b759547932e8d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_accelerate_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from huggingface-course)
+author: John Snow Labs
+name: bert_qa_huggingface_course_bert_finetuned_squad_accelerate
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-accelerate` is a English model orginally trained by `huggingface-course`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_huggingface_course_bert_finetuned_squad_accelerate_en_5.2.0_3.0_1700006192068.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_huggingface_course_bert_finetuned_squad_accelerate_en_5.2.0_3.0_1700006192068.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_huggingface_course_bert_finetuned_squad_accelerate","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_huggingface_course_bert_finetuned_squad_accelerate","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.accelerate.by_huggingface-course").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_huggingface_course_bert_finetuned_squad_accelerate|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/huggingface-course/bert-finetuned-squad-accelerate
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_en.md
new file mode 100644
index 00000000000000..8d5d3fbef46d20
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_huggingface_course_bert_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from huggingface-course)
+author: John Snow Labs
+name: bert_qa_huggingface_course_bert_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model orginally trained by `huggingface-course`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_huggingface_course_bert_finetuned_squad_en_5.2.0_3.0_1700005924998.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_huggingface_course_bert_finetuned_squad_en_5.2.0_3.0_1700005924998.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_huggingface_course_bert_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_huggingface_course_bert_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_huggingface-course").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_huggingface_course_bert_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/huggingface-course/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hungarian_fine_tuned_hungarian_squadv2_hu.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hungarian_fine_tuned_hungarian_squadv2_hu.md
new file mode 100644
index 00000000000000..c18ef79d3c607b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_hungarian_fine_tuned_hungarian_squadv2_hu.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Hungarian bert_qa_hungarian_fine_tuned_hungarian_squadv2 BertForQuestionAnswering from mcsabai
+author: John Snow Labs
+name: bert_qa_hungarian_fine_tuned_hungarian_squadv2
+date: 2023-11-14
+tags: [bert, hu, open_source, question_answering, onnx]
+task: Question Answering
+language: hu
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_hungarian_fine_tuned_hungarian_squadv2` is a Hungarian model originally trained by mcsabai.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hungarian_fine_tuned_hungarian_squadv2_hu_5.2.0_3.0_1699998824803.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hungarian_fine_tuned_hungarian_squadv2_hu_5.2.0_3.0_1699998824803.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_hungarian_fine_tuned_hungarian_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|hu|
+|Size:|412.4 MB|
+
+## References
+
+https://huggingface.co/mcsabai/huBert-fine-tuned-hungarian-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ixambert_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ixambert_finetuned_squad_en.md
new file mode 100644
index 00000000000000..22ce3873434d01
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ixambert_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from MarcBrun)
+author: John Snow Labs
+name: bert_qa_ixambert_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ixambert-finetuned-squad` is a English model orginally trained by `MarcBrun`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_ixambert_finetuned_squad_en_5.2.0_3.0_1700004747005.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_ixambert_finetuned_squad_en_5.2.0_3.0_1700004747005.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_ixambert_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_ixambert_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.ixam_bert.by_MarcBrun").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_ixambert_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|661.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/MarcBrun/ixambert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_jatinshah_bert_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_jatinshah_bert_finetuned_squad_en.md
new file mode 100644
index 00000000000000..c0eb458433f6fc
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_jatinshah_bert_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from jatinshah)
+author: John Snow Labs
+name: bert_qa_jatinshah_bert_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model orginally trained by `jatinshah`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_jatinshah_bert_finetuned_squad_en_5.2.0_3.0_1700005039392.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_jatinshah_bert_finetuned_squad_en_5.2.0_3.0_1700005039392.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_jatinshah_bert_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_jatinshah_bert_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_jatinshah").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_jatinshah_bert_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jatinshah/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kd_squad1.1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kd_squad1.1_en.md
new file mode 100644
index 00000000000000..5194f4d09319a9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kd_squad1.1_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from maroo93)
+author: John Snow Labs
+name: bert_qa_kd_squad1.1
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `kd_squad1.1` is a English model originally trained by `maroo93`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kd_squad1.1_en_5.2.0_3.0_1700005266222.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kd_squad1.1_en_5.2.0_3.0_1700005266222.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kd_squad1.1","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kd_squad1.1","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kd_squad1.1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|249.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/maroo93/kd_squad1.1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_accelera_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_accelera_en.md
new file mode 100644
index 00000000000000..3c577cb74e2150
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_accelera_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from KFlash)
+author: John Snow Labs
+name: bert_qa_kflash_finetuned_squad_accelera
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-accelerate` is a English model originally trained by `KFlash`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kflash_finetuned_squad_accelera_en_5.2.0_3.0_1700005786539.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kflash_finetuned_squad_accelera_en_5.2.0_3.0_1700005786539.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kflash_finetuned_squad_accelera","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kflash_finetuned_squad_accelera","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned_squad_accelera.by_KFlash").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kflash_finetuned_squad_accelera|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/KFlash/bert-finetuned-squad-accelerate
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_en.md
new file mode 100644
index 00000000000000..11cb601344798e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kflash_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from KFlash)
+author: John Snow Labs
+name: bert_qa_kflash_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `KFlash`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kflash_finetuned_squad_en_5.2.0_3.0_1700005489077.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kflash_finetuned_squad_en_5.2.0_3.0_1700005489077.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kflash_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kflash_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned_squad.by_KFlash").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kflash_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/KFlash/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kobert_finetuned_klue_v2_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kobert_finetuned_klue_v2_ko.md
new file mode 100644
index 00000000000000..7c9ccdfaecb570
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_kobert_finetuned_klue_v2_ko.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering Cased model (from obokkkk)
+author: John Snow Labs
+name: bert_qa_kobert_finetuned_klue_v2
+date: 2023-11-14
+tags: [ko, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `kobert-finetuned-klue-v2` is a Korean model originally trained by `obokkkk`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kobert_finetuned_klue_v2_ko_5.2.0_3.0_1700006185949.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kobert_finetuned_klue_v2_ko_5.2.0_3.0_1700006185949.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kobert_finetuned_klue_v2","ko") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kobert_finetuned_klue_v2","ko")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kobert_finetuned_klue_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|342.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/obokkkk/kobert-finetuned-klue-v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_komrc_train_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_komrc_train_ko.md
new file mode 100644
index 00000000000000..730b9d8c587438
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_komrc_train_ko.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering Cased model (from Taekyoon)
+author: John Snow Labs
+name: bert_qa_komrc_train
+date: 2023-11-14
+tags: [ko, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `komrc_train` is a Korean model originally trained by `Taekyoon`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_komrc_train_ko_5.2.0_3.0_1699999319111.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_komrc_train_ko_5.2.0_3.0_1699999319111.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_komrc_train","ko") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_komrc_train","ko")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_komrc_train|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|406.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Taekyoon/komrc_train
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_korean_finetuned_klue_v2_ko.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_korean_finetuned_klue_v2_ko.md
new file mode 100644
index 00000000000000..fd1afd3cac74ae
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_korean_finetuned_klue_v2_ko.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Korean bert_qa_korean_finetuned_klue_v2 BertForQuestionAnswering from Seongmi
+author: John Snow Labs
+name: bert_qa_korean_finetuned_klue_v2
+date: 2023-11-14
+tags: [bert, ko, open_source, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_korean_finetuned_klue_v2` is a Korean model originally trained by Seongmi.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_korean_finetuned_klue_v2_ko_5.2.0_3.0_1700005960456.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_korean_finetuned_klue_v2_ko_5.2.0_3.0_1700005960456.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_korean_finetuned_klue_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|342.9 MB|
+
+## References
+
+https://huggingface.co/Seongmi/kobert-finetuned-klue-v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_infovqa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_infovqa_en.md
new file mode 100644
index 00000000000000..c6cd5cd3b58457
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_infovqa_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Uncased model (from tiennvcs)
+author: John Snow Labs
+name: bert_qa_large_uncased_finetuned_infovqa
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-finetuned-infovqa` is a English model originally trained by `tiennvcs`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_infovqa_en_5.2.0_3.0_1699999822368.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_infovqa_en_5.2.0_3.0_1699999822368.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_large_uncased_finetuned_infovqa","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_large_uncased_finetuned_infovqa","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.uncased_large_finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_uncased_finetuned_infovqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/tiennvcs/bert-large-uncased-finetuned-infovqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_squadv1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_squadv1_en.md
new file mode 100644
index 00000000000000..d5c84f2e45e280
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_squadv1_en.md
@@ -0,0 +1,96 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Uncased model (from neuralmagic)
+author: John Snow Labs
+name: bert_qa_large_uncased_finetuned_squadv1
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-finetuned-squadv1` is a English model originally trained by `neuralmagic`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_squadv1_en_5.2.0_3.0_1700000343445.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_squadv1_en_5.2.0_3.0_1700000343445.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large_uncased_finetuned_squadv1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large_uncased_finetuned_squadv1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_uncased_finetuned_squadv1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/neuralmagic/bert-large-uncased-finetuned-squadv1
+- https://arxiv.org/abs/2203.07259
+- https://github.com/neuralmagic/sparseml/tree/main/research/optimal_BERT_surgeon_oBERT
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_vietnamese_infovqa_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_vietnamese_infovqa_en.md
new file mode 100644
index 00000000000000..f2358b6349e4ae
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_large_uncased_finetuned_vietnamese_infovqa_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_large_uncased_finetuned_vietnamese_infovqa BertForQuestionAnswering from tiennvcs
+author: John Snow Labs
+name: bert_qa_large_uncased_finetuned_vietnamese_infovqa
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_large_uncased_finetuned_vietnamese_infovqa` is a English model originally trained by tiennvcs.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_vietnamese_infovqa_en_5.2.0_3.0_1700000677852.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_finetuned_vietnamese_infovqa_en_5.2.0_3.0_1700000677852.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_uncased_finetuned_vietnamese_infovqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+
+## References
+
+https://huggingface.co/tiennvcs/bert-large-uncased-finetuned-vi-infovqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_lewtun_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_lewtun_finetuned_squad_en.md
new file mode 100644
index 00000000000000..c5d2acf4e4f638
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_lewtun_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from lewtun)
+author: John Snow Labs
+name: bert_qa_lewtun_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `lewtun`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_lewtun_finetuned_squad_en_5.2.0_3.0_1700000994244.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_lewtun_finetuned_squad_en_5.2.0_3.0_1700000994244.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_lewtun_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_lewtun_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_lewtun").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_lewtun_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/lewtun/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_linkbert_large_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_linkbert_large_finetuned_squad_en.md
new file mode 100644
index 00000000000000..81bd727cfd82c4
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_linkbert_large_finetuned_squad_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from niklaspm)
+author: John Snow Labs
+name: bert_qa_linkbert_large_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `linkbert-large-finetuned-squad` is a English model orginally trained by `niklaspm`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_linkbert_large_finetuned_squad_en_5.2.0_3.0_1700001527794.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_linkbert_large_finetuned_squad_en_5.2.0_3.0_1700001527794.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_linkbert_large_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_linkbert_large_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.link_bert.large").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_linkbert_large_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/niklaspm/linkbert-large-finetuned-squad
+- https://arxiv.org/abs/2203.15827
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_m_xx.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_m_xx.md
new file mode 100644
index 00000000000000..0e9a4c54dbc3c6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_m_xx.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering Cased model (from sepiosky)
+author: John Snow Labs
+name: bert_qa_m
+date: 2023-11-14
+tags: [xx, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `MBERT_QA` is a Multilingual model originally trained by `sepiosky`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_m_xx_5.2.0_3.0_1700001968982.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_m_xx_5.2.0_3.0_1700001968982.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_m","xx")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_m","xx")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_m|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|625.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sepiosky/MBERT_QA
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_macsquad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_macsquad_en.md
new file mode 100644
index 00000000000000..02e277f0a5fe20
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_macsquad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Nadav)
+author: John Snow Labs
+name: bert_qa_macsquad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `MacSQuAD` is a English model originally trained by `Nadav`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_macsquad_en_5.2.0_3.0_1700002467032.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_macsquad_en_5.2.0_3.0_1700002467032.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_macsquad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_macsquad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.by_nadav").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_macsquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|406.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Nadav/MacSQuAD
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mbert_all_tahitian_sqen_sq20_1_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mbert_all_tahitian_sqen_sq20_1_en.md
new file mode 100644
index 00000000000000..b67f0f2ffd893e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mbert_all_tahitian_sqen_sq20_1_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_mbert_all_tahitian_sqen_sq20_1 BertForQuestionAnswering from krinal214
+author: John Snow Labs
+name: bert_qa_mbert_all_tahitian_sqen_sq20_1
+date: 2023-11-14
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_mbert_all_tahitian_sqen_sq20_1` is a English model originally trained by krinal214.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_all_tahitian_sqen_sq20_1_en_5.2.0_3.0_1700002195661.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_all_tahitian_sqen_sq20_1_en_5.2.0_3.0_1700002195661.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mbert_finetuned_mlqa_vietnamese_hindi_dev|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|625.5 MB|
+
+## References
+
+https://huggingface.co/roshnir/mBert-finetuned-mlqa-dev-vi-hi
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mkkc58_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mkkc58_finetuned_squad_en.md
new file mode 100644
index 00000000000000..370413c0dba0e1
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mkkc58_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from mkkc58)
+author: John Snow Labs
+name: bert_qa_mkkc58_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `mkkc58`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mkkc58_finetuned_squad_en_5.2.0_3.0_1700003196849.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mkkc58_finetuned_squad_en_5.2.0_3.0_1700003196849.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mkkc58_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_mkkc58_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_mkkc58").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mkkc58_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mkkc58/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_modelontquad_tr.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_modelontquad_tr.md
new file mode 100644
index 00000000000000..56925ddeed7cfa
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_modelontquad_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Cased model (from Aybars)
+author: John Snow Labs
+name: bert_qa_modelontquad
+date: 2023-11-14
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ModelOnTquad` is a Turkish model originally trained by `Aybars`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_modelontquad_tr_5.2.0_3.0_1700003570293.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_modelontquad_tr_5.2.0_3.0_1700003570293.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_modelontquad","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_modelontquad","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_modelontquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|688.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Aybars/ModelOnTquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_monakth_base_cased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_monakth_base_cased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..55dfd71086e92c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_monakth_base_cased_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from monakth)
+author: John Snow Labs
+name: bert_qa_monakth_base_cased_finetuned_squad
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-cased-finetuned-squad` is a English model originally trained by `monakth`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_cased_finetuned_squad_en_5.2.0_3.0_1700003875248.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_cased_finetuned_squad_en_5.2.0_3.0_1700003875248.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_cased_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_cased_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_monakth_base_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/monakth/bert-base-cased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mqa_baseline_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mqa_baseline_en.md
new file mode 100644
index 00000000000000..0f205df21f2347
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_mqa_baseline_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from xraychen)
+author: John Snow Labs
+name: bert_qa_mqa_baseline
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mqa-baseline` is a English model orginally trained by `xraychen`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mqa_baseline_en_5.2.0_3.0_1700004141822.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mqa_baseline_en_5.2.0_3.0_1700004141822.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mqa_baseline","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_mqa_baseline","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base.by_xraychen").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mqa_baseline|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/xraychen/mqa-baseline
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_multilingual_bert_base_cased_english_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_multilingual_bert_base_cased_english_en.md
new file mode 100644
index 00000000000000..f688bf0eb120da
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_multilingual_bert_base_cased_english_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_bert_base_cased_english
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-english` is a English model orginally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_english_en_5.2.0_3.0_1700004618287.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_english_en_5.2.0_3.0_1700004618287.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multilingual_bert_base_cased_english","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_multilingual_bert_base_cased_english","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.multilingual_english_tuned_base_cased.by_bhavikardeshna").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_bert_base_cased_english|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-english
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_muril_large_cased_hita_qa_hi.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_muril_large_cased_hita_qa_hi.md
new file mode 100644
index 00000000000000..36e468e02be96c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_muril_large_cased_hita_qa_hi.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Hindi BertForQuestionAnswering model (from Yuchen)
+author: John Snow Labs
+name: bert_qa_muril_large_cased_hita_qa
+date: 2023-11-14
+tags: [open_source, question_answering, bert, hi, onnx]
+task: Question Answering
+language: hi
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `muril-large-cased-hita-qa` is a Hindi model orginally trained by `Yuchen`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_muril_large_cased_hita_qa_hi_5.2.0_3.0_1700005221724.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_muril_large_cased_hita_qa_hi_5.2.0_3.0_1700005221724.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_muril_large_cased_hita_qa","hi") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_muril_large_cased_hita_qa","hi")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("hi.answer_question.bert.large_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_muril_large_cased_hita_qa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|hi|
+|Size:|1.9 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Yuchen/muril-large-cased-hita-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_nausheen_finetuned_squad_accelera_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_nausheen_finetuned_squad_accelera_en.md
new file mode 100644
index 00000000000000..5c6defa8678812
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_nausheen_finetuned_squad_accelera_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Nausheen)
+author: John Snow Labs
+name: bert_qa_nausheen_finetuned_squad_accelera
+date: 2023-11-14
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-accelerate` is a English model originally trained by `Nausheen`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_nausheen_finetuned_squad_accelera_en_5.2.0_3.0_1700005481407.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_nausheen_finetuned_squad_accelera_en_5.2.0_3.0_1700005481407.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_nausheen_finetuned_squad_accelera","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_nausheen_finetuned_squad_accelera","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_Nausheen").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_nausheen_finetuned_squad_accelera|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Nausheen/bert-finetuned-squad-accelerate
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_neuralmind_base_portuguese_squad_pt.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_neuralmind_base_portuguese_squad_pt.md
new file mode 100644
index 00000000000000..0a4a9bd179db6f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_neuralmind_base_portuguese_squad_pt.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Portuguese BertForQuestionAnswering Base Cased model (from p2o)
+author: John Snow Labs
+name: bert_qa_neuralmind_base_portuguese_squad
+date: 2023-11-14
+tags: [pt, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: pt
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `neuralmind-bert-base-portuguese-squad` is a Portuguese model originally trained by `p2o`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_neuralmind_base_portuguese_squad_pt_5.2.0_3.0_1700005781942.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_neuralmind_base_portuguese_squad_pt_5.2.0_3.0_1700005781942.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_neuralmind_base_portuguese_squad","pt")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_neuralmind_base_portuguese_squad","pt")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_neuralmind_base_portuguese_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|pt|
+|Size:|405.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/p2o/neuralmind-bert-base-portuguese-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ofirzaf_bert_large_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ofirzaf_bert_large_uncased_squad_en.md
new file mode 100644
index 00000000000000..7b87797cf00e24
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-14-bert_qa_ofirzaf_bert_large_uncased_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ofirzaf)
+author: John Snow Labs
+name: bert_qa_ofirzaf_bert_large_uncased_squad
+date: 2023-11-14
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squad` is a English model orginally trained by `ofirzaf`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_ofirzaf_bert_large_uncased_squad_en_5.2.0_3.0_1700006266704.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_ofirzaf_bert_large_uncased_squad_en_5.2.0_3.0_1700006266704.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_ofirzaf_bert_large_uncased_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_ofirzaf_bert_large_uncased_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large_uncased.by_ofirzaf").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_ofirzaf_bert_large_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ofirzaf/bert-large-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_burmese_model_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_burmese_model_en.md
new file mode 100644
index 00000000000000..f745156e9025c5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_burmese_model_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_burmese_model BertForQuestionAnswering from Shredder
+author: John Snow Labs
+name: bert_qa_burmese_model
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_burmese_model` is a English model originally trained by Shredder.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_burmese_model_en_5.2.0_3.0_1700008721426.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_burmese_model_en_5.2.0_3.0_1700008721426.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_burmese_model|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+
+## References
+
+https://huggingface.co/Shredder/My_model
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_dylan1999_finetuned_squad_accelerate_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_dylan1999_finetuned_squad_accelerate_en.md
new file mode 100644
index 00000000000000..1a10af8ecf7772
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_dylan1999_finetuned_squad_accelerate_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Dylan1999)
+author: John Snow Labs
+name: bert_qa_dylan1999_finetuned_squad_accelerate
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-accelerate` is a English model originally trained by `Dylan1999`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_dylan1999_finetuned_squad_accelerate_en_5.2.0_3.0_1700006698837.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_dylan1999_finetuned_squad_accelerate_en_5.2.0_3.0_1700006698837.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dylan1999_finetuned_squad_accelerate","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_dylan1999_finetuned_squad_accelerate","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_dylan1999_finetuned_squad_accelerate|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Dylan1999/bert-finetuned-squad-accelerate
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fabianwillner_base_uncased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fabianwillner_base_uncased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..d9393080c547a1
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fabianwillner_base_uncased_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from FabianWillner)
+author: John Snow Labs
+name: bert_qa_fabianwillner_base_uncased_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-finetuned-squad` is a English model originally trained by `FabianWillner`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fabianwillner_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007113810.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fabianwillner_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007113810.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fabianwillner_base_uncased_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fabianwillner_base_uncased_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fabianwillner_base_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/FabianWillner/bert-base-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v2_en.md
new file mode 100644
index 00000000000000..f2217b19f81563
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peggyhuang)
+author: John Snow Labs
+name: bert_qa_finetune_bert_base_v2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `finetune-bert-base-v2` is a English model orginally trained by `peggyhuang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v2_en_5.2.0_3.0_1700007361101.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v2_en_5.2.0_3.0_1700007361101.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_finetune_bert_base_v2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_finetune_bert_base_v2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base_v2").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetune_bert_base_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peggyhuang/finetune-bert-base-v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v3_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v3_en.md
new file mode 100644
index 00000000000000..a51295841b71a7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetune_bert_base_v3_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peggyhuang)
+author: John Snow Labs
+name: bert_qa_finetune_bert_base_v3
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `finetune-bert-base-v3` is a English model orginally trained by `peggyhuang`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v3_en_5.2.0_3.0_1700007635634.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetune_bert_base_v3_en_5.2.0_3.0_1700007635634.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_finetune_bert_base_v3","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_finetune_bert_base_v3","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.base_v3.by_peggyhuang").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetune_bert_base_v3|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peggyhuang/finetune-bert-base-v3
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetuned_custom_1_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetuned_custom_1_en.md
new file mode 100644
index 00000000000000..5396a5841b4e51
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_finetuned_custom_1_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from VedantS01)
+author: John Snow Labs
+name: bert_qa_finetuned_custom_1
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-custom-1` is a English model originally trained by `VedantS01`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_1_en_5.2.0_3.0_1700007920448.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_finetuned_custom_1_en_5.2.0_3.0_1700007920448.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom_1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_finetuned_custom_1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_finetuned_custom_1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/VedantS01/bert-finetuned-custom-1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_bert_ft_newsqa_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_bert_ft_newsqa_en.md
new file mode 100644
index 00000000000000..391f61aa216e17
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_bert_ft_newsqa_en.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: English bert_qa_fpdm_bert_ft_newsqa BertForQuestionAnswering from AnonymousSub
+author: John Snow Labs
+name: bert_qa_fpdm_bert_ft_newsqa
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_fpdm_bert_ft_newsqa` is a English model originally trained by AnonymousSub.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_bert_ft_newsqa_en_5.2.0_3.0_1700008214320.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_bert_ft_newsqa_en_5.2.0_3.0_1700008214320.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fpdm_bert_ft_newsqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+https://huggingface.co/AnonymousSub/fpdm_bert_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_pert_sent_0.01_squad2.0_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_pert_sent_0.01_squad2.0_en.md
new file mode 100644
index 00000000000000..f315005649dbda
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_fpdm_pert_sent_0.01_squad2.0_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: bert_qa_fpdm_pert_sent_0.01_squad2.0
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_bert_pert_sent_0.01_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_pert_sent_0.01_squad2.0_en_5.2.0_3.0_1700008478770.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_fpdm_pert_sent_0.01_squad2.0_en_5.2.0_3.0_1700008478770.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fpdm_pert_sent_0.01_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_fpdm_pert_sent_0.01_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_fpdm_pert_sent_0.01_squad2.0|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/AnonymousSub/fpdm_bert_pert_sent_0.01_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_howey_bert_large_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_howey_bert_large_uncased_squad_en.md
new file mode 100644
index 00000000000000..3454e69377b0f4
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_howey_bert_large_uncased_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from howey)
+author: John Snow Labs
+name: bert_qa_howey_bert_large_uncased_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squad` is a English model orginally trained by `howey`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_howey_bert_large_uncased_squad_en_5.2.0_3.0_1700006901799.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_howey_bert_large_uncased_squad_en_5.2.0_3.0_1700006901799.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_howey_bert_large_uncased_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_howey_bert_large_uncased_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large_uncased.by_howey").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_howey_bert_large_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/howey/bert-large-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_hubert_fine_tuned_hungarian_squadv1_hu.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_hubert_fine_tuned_hungarian_squadv1_hu.md
new file mode 100644
index 00000000000000..5bc11238850ac5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_hubert_fine_tuned_hungarian_squadv1_hu.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: Hungarian bert_qa_hubert_fine_tuned_hungarian_squadv1 BertForQuestionAnswering from mcsabai
+author: John Snow Labs
+name: bert_qa_hubert_fine_tuned_hungarian_squadv1
+date: 2023-11-15
+tags: [bert, hu, open_source, question_answering, onnx]
+task: Question Answering
+language: hu
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_hubert_fine_tuned_hungarian_squadv1` is a Hungarian model originally trained by mcsabai.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_hubert_fine_tuned_hungarian_squadv1_hu_5.2.0_3.0_1700007175058.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_hubert_fine_tuned_hungarian_squadv1_hu_5.2.0_3.0_1700007175058.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_indo_base_indonesian_finetune_idk_mrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|id|
+|Size:|464.2 MB|
+
+## References
+
+https://huggingface.co/rifkiaputri/indobert-base-id-finetune-idk-mrc
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_base_uncased_finetuned_tydi_indo_in.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_base_uncased_finetuned_tydi_indo_in.md
new file mode 100644
index 00000000000000..a8a56c8209cd4a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_base_uncased_finetuned_tydi_indo_in.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Indonesian BertForQuestionAnswering Base Uncased model (from jakartaresearch)
+author: John Snow Labs
+name: bert_qa_indo_base_uncased_finetuned_tydi_indo
+date: 2023-11-15
+tags: [in, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: in
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `indobert-base-uncased-finetuned-tydiqa-indoqa` is a Indonesian model originally trained by `jakartaresearch`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_indo_base_uncased_finetuned_tydi_indo_in_5.2.0_3.0_1700007414125.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_indo_base_uncased_finetuned_tydi_indo_in_5.2.0_3.0_1700007414125.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_base_uncased_finetuned_tydi_indo","in")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_base_uncased_finetuned_tydi_indo","in")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_indo_base_uncased_finetuned_tydi_indo|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|in|
+|Size:|411.7 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jakartaresearch/indobert-base-uncased-finetuned-tydiqa-indoqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetune_tydi_transfer_indo_in.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetune_tydi_transfer_indo_in.md
new file mode 100644
index 00000000000000..9ba70beb602236
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetune_tydi_transfer_indo_in.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Indonesian BertForQuestionAnswering Cased model (from andreaschandra)
+author: John Snow Labs
+name: bert_qa_indo_finetune_tydi_transfer_indo
+date: 2023-11-15
+tags: [in, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: in
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `indobert-finetune-tydiqa-transfer-indoqa` is a Indonesian model originally trained by `andreaschandra`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_indo_finetune_tydi_transfer_indo_in_5.2.0_3.0_1700006631538.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_indo_finetune_tydi_transfer_indo_in_5.2.0_3.0_1700006631538.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_finetune_tydi_transfer_indo","in")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_finetune_tydi_transfer_indo","in")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_indo_finetune_tydi_transfer_indo|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|in|
+|Size:|411.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/andreaschandra/indobert-finetune-tydiqa-transfer-indoqa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetuned_squad_id.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetuned_squad_id.md
new file mode 100644
index 00000000000000..a942b535a2aa85
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_indo_finetuned_squad_id.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Indonesian BertForQuestionAnswering Cased model (from botika)
+author: John Snow Labs
+name: bert_qa_indo_finetuned_squad
+date: 2023-11-15
+tags: [id, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: id
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Indobert-QA-finetuned-squad` is a Indonesian model originally trained by `botika`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_indo_finetuned_squad_id_5.2.0_3.0_1700008964605.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_indo_finetuned_squad_id_5.2.0_3.0_1700008964605.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_finetuned_squad","id")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_indo_finetuned_squad","id")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_indo_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|id|
+|Size:|411.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/botika/Indobert-QA-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_internetoftim_bert_large_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_internetoftim_bert_large_uncased_squad_en.md
new file mode 100644
index 00000000000000..9bf97784216c15
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_internetoftim_bert_large_uncased_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from internetoftim)
+author: John Snow Labs
+name: bert_qa_internetoftim_bert_large_uncased_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-uncased-squad` is a English model orginally trained by `internetoftim`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_internetoftim_bert_large_uncased_squad_en_5.2.0_3.0_1700007331979.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_internetoftim_bert_large_uncased_squad_en_5.2.0_3.0_1700007331979.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_internetoftim_bert_large_uncased_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_internetoftim_bert_large_uncased_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.large_uncased.by_internetoftim").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_internetoftim_bert_large_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|797.4 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/internetoftim/bert-large-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_irenelizihui_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_irenelizihui_finetuned_squad_en.md
new file mode 100644
index 00000000000000..bee046bbcff5a6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_irenelizihui_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from irenelizihui)
+author: John Snow Labs
+name: bert_qa_irenelizihui_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `irenelizihui`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_irenelizihui_finetuned_squad_en_5.2.0_3.0_1700009279150.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_irenelizihui_finetuned_squad_en_5.2.0_3.0_1700009279150.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_irenelizihui_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_irenelizihui_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_irenelizihui").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_irenelizihui_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/irenelizihui/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ixambert_finetuned_squad_basque_marcbrun_eu.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ixambert_finetuned_squad_basque_marcbrun_eu.md
new file mode 100644
index 00000000000000..5dd42be70cbf79
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ixambert_finetuned_squad_basque_marcbrun_eu.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Basque bert_qa_ixambert_finetuned_squad_basque_marcbrun BertForQuestionAnswering from MarcBrun
+author: John Snow Labs
+name: bert_qa_ixambert_finetuned_squad_basque_marcbrun
+date: 2023-11-15
+tags: [bert, eu, open_source, question_answering, onnx]
+task: Question Answering
+language: eu
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_ixambert_finetuned_squad_basque_marcbrun` is a Basque model originally trained by MarcBrun.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_ixambert_finetuned_squad_basque_marcbrun_eu_5.2.0_3.0_1700009508595.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_ixambert_finetuned_squad_basque_marcbrun_eu_5.2.0_3.0_1700009508595.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_ixambert_finetuned_squad_basque_marcbrun|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|eu|
+|Size:|661.1 MB|
+
+## References
+
+https://huggingface.co/MarcBrun/ixambert-finetuned-squad-eu
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_jimypbr_bert_base_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_jimypbr_bert_base_uncased_squad_en.md
new file mode 100644
index 00000000000000..3c4e0d94b3d5e4
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_jimypbr_bert_base_uncased_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from jimypbr)
+author: John Snow Labs
+name: bert_qa_jimypbr_bert_base_uncased_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-squad` is a English model orginally trained by `jimypbr`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_jimypbr_bert_base_uncased_squad_en_5.2.0_3.0_1700007658852.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_jimypbr_bert_base_uncased_squad_en_5.2.0_3.0_1700007658852.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_jimypbr_bert_base_uncased_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_jimypbr_bert_base_uncased_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.base_uncased.by_jimypbr").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_jimypbr_bert_base_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|258.5 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jimypbr/bert-base-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kamilali_distilbert_base_uncased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kamilali_distilbert_base_uncased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..c99949192f7d69
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kamilali_distilbert_base_uncased_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from kamilali)
+author: John Snow Labs
+name: bert_qa_kamilali_distilbert_base_uncased_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilbert-base-uncased-finetuned-squad` is a English model orginally trained by `kamilali`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kamilali_distilbert_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007960980.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kamilali_distilbert_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007960980.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kamilali_distilbert_base_uncased_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_kamilali_distilbert_base_uncased_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.distilled_base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kamilali_distilbert_base_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/kamilali/distilbert-base-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kaporter_bert_base_uncased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kaporter_bert_base_uncased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..71b7489a394c8c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kaporter_bert_base_uncased_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from kaporter)
+author: John Snow Labs
+name: bert_qa_kaporter_bert_base_uncased_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-finetuned-squad` is a English model orginally trained by `kaporter`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kaporter_bert_base_uncased_finetuned_squad_en_5.2.0_3.0_1700008245981.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kaporter_bert_base_uncased_finetuned_squad_en_5.2.0_3.0_1700008245981.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kaporter_bert_base_uncased_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_kaporter_bert_base_uncased_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.base_uncased.by_kaporter").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kaporter_bert_base_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/kaporter/bert-base-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kcbert_base_finetuned_squad_ko.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kcbert_base_finetuned_squad_ko.md
new file mode 100644
index 00000000000000..45bc9ce7f75fcc
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kcbert_base_finetuned_squad_ko.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering Base Cased model (from tucan9389)
+author: John Snow Labs
+name: bert_qa_kcbert_base_finetuned_squad
+date: 2023-11-15
+tags: [ko, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `kcbert-base-finetuned-squad` is a Korean model originally trained by `tucan9389`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kcbert_base_finetuned_squad_ko_5.2.0_3.0_1700008506310.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kcbert_base_finetuned_squad_ko_5.2.0_3.0_1700008506310.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kcbert_base_finetuned_squad","ko") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kcbert_base_finetuned_squad","ko")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kcbert_base_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|406.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/tucan9389/kcbert-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_keepitreal_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_keepitreal_finetuned_squad_en.md
new file mode 100644
index 00000000000000..5ffb340213ba83
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_keepitreal_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from keepitreal)
+author: John Snow Labs
+name: bert_qa_keepitreal_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `keepitreal`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_keepitreal_finetuned_squad_en_5.2.0_3.0_1700008749370.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_keepitreal_finetuned_squad_en_5.2.0_3.0_1700008749370.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_keepitreal_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_keepitreal_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_keepitreal_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/keepitreal/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_khanh_base_multilingual_cased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_khanh_base_multilingual_cased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..473270e6716d6a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_khanh_base_multilingual_cased_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from Khanh)
+author: John Snow Labs
+name: bert_qa_khanh_base_multilingual_cased_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-squad` is a English model originally trained by `Khanh`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_khanh_base_multilingual_cased_finetuned_squad_en_5.2.0_3.0_1700009076961.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_khanh_base_multilingual_cased_finetuned_squad_en_5.2.0_3.0_1700009076961.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_khanh_base_multilingual_cased_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_khanh_base_multilingual_cased_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.cased_multilingual_base_finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_khanh_base_multilingual_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Khanh/bert-base-multilingual-cased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_klue_bert_base_aihub_mrc_ko.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_klue_bert_base_aihub_mrc_ko.md
new file mode 100644
index 00000000000000..444ff065fc824e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_klue_bert_base_aihub_mrc_ko.md
@@ -0,0 +1,111 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering model (from bespin-global)
+author: John Snow Labs
+name: bert_qa_klue_bert_base_aihub_mrc
+date: 2023-11-15
+tags: [ko, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `klue-bert-base-aihub-mrc` is a Korean model orginally trained by `bespin-global`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_klue_bert_base_aihub_mrc_ko_5.2.0_3.0_1700009374093.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_klue_bert_base_aihub_mrc_ko_5.2.0_3.0_1700009374093.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_klue_bert_base_aihub_mrc","ko") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_klue_bert_base_aihub_mrc","ko")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ko.answer_question.klue.bert.base_aihub.by_bespin-global").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_klue_bert_base_aihub_mrc|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ko|
+|Size:|412.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bespin-global/klue-bert-base-aihub-mrc
+- https://github.com/KLUE-benchmark/KLUE
+- https://www.bespinglobal.com/
+- https://aihub.or.kr/aidata/86
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kobert_finetuned_squad_kor_v1_ko.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kobert_finetuned_squad_kor_v1_ko.md
new file mode 100644
index 00000000000000..97fac200dca544
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_kobert_finetuned_squad_kor_v1_ko.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering Cased model (from arogyaGurkha)
+author: John Snow Labs
+name: bert_qa_kobert_finetuned_squad_kor_v1
+date: 2023-11-15
+tags: [ko, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `kobert-finetuned-squad_kor_v1` is a Korean model originally trained by `arogyaGurkha`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_kobert_finetuned_squad_kor_v1_ko_5.2.0_3.0_1700009633696.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_kobert_finetuned_squad_kor_v1_ko_5.2.0_3.0_1700009633696.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_kobert_finetuned_squad_kor_v1","ko") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_kobert_finetuned_squad_kor_v1","ko")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_kobert_finetuned_squad_kor_v1|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|342.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/arogyaGurkha/kobert-finetuned-squad_kor_v1
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_korean_lm_finetuned_klue_v2_ko.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_korean_lm_finetuned_klue_v2_ko.md
new file mode 100644
index 00000000000000..095d97ae6ef463
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_korean_lm_finetuned_klue_v2_ko.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Korean bert_qa_korean_lm_finetuned_klue_v2 BertForQuestionAnswering from 2tina
+author: John Snow Labs
+name: bert_qa_korean_lm_finetuned_klue_v2
+date: 2023-11-15
+tags: [bert, ko, open_source, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_korean_lm_finetuned_klue_v2` is a Korean model originally trained by 2tina.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_korean_lm_finetuned_klue_v2_ko_5.2.0_3.0_1700009647081.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_korean_lm_finetuned_klue_v2_ko_5.2.0_3.0_1700009647081.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_korean_lm_finetuned_klue_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|342.7 MB|
+
+## References
+
+https://huggingface.co/2tina/kobert-lm-finetuned-klue-v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_en.md
new file mode 100644
index 00000000000000..92099edc8acce7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Cased model (from srcocotero)
+author: John Snow Labs
+name: bert_qa_large
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-qa` is a English model originally trained by `srcocotero`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_en_5.2.0_3.0_1700010204357.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_en_5.2.0_3.0_1700010204357.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/srcocotero/bert-large-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_japanese_wikipedia_ud_head_ja.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_japanese_wikipedia_ud_head_ja.md
new file mode 100644
index 00000000000000..105944d750edeb
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_japanese_wikipedia_ud_head_ja.md
@@ -0,0 +1,101 @@
+---
+layout: model
+title: Japanese BertForQuestionAnswering Large model (from KoichiYasuoka)
+author: John Snow Labs
+name: bert_qa_large_japanese_wikipedia_ud_head
+date: 2023-11-15
+tags: [ja, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ja
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-japanese-wikipedia-ud-head` is a Japanese model originally trained by `KoichiYasuoka`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_japanese_wikipedia_ud_head_ja_5.2.0_3.0_1700006723855.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_japanese_wikipedia_ud_head_ja_5.2.0_3.0_1700006723855.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_japanese_wikipedia_ud_head|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ja|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/KoichiYasuoka/bert-large-japanese-wikipedia-ud-head
+- https://github.com/UniversalDependencies/UD_Japanese-GSDLUW
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_squad_en.md
new file mode 100644
index 00000000000000..b1a80652182fc9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Large Cased model (from jaimin)
+author: John Snow Labs
+name: bert_qa_large_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-large-squad` is a English model originally trained by `jaimin`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_squad_en_5.2.0_3.0_1700008165848.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_squad_en_5.2.0_3.0_1700008165848.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_large_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jaimin/bert-large-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_uncased_spanish_sign_language_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_uncased_spanish_sign_language_en.md
new file mode 100644
index 00000000000000..7c7e2d92c88337
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_large_uncased_spanish_sign_language_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_large_uncased_spanish_sign_language BertForQuestionAnswering from michaelrglass
+author: John Snow Labs
+name: bert_qa_large_uncased_spanish_sign_language
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_large_uncased_spanish_sign_language` is a English model originally trained by michaelrglass.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_spanish_sign_language_en_5.2.0_3.0_1700010812943.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_large_uncased_spanish_sign_language_en_5.2.0_3.0_1700010812943.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_large_uncased_spanish_sign_language|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|795.1 MB|
+
+## References
+
+https://huggingface.co/michaelrglass/bert-large-uncased-sspt
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_linkbert_base_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_linkbert_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..865fdd6d5234ea
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_linkbert_base_finetuned_squad_en.md
@@ -0,0 +1,101 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from niklaspm)
+author: John Snow Labs
+name: bert_qa_linkbert_base_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `linkbert-base-finetuned-squad` is a English model originally trained by `niklaspm`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_linkbert_base_finetuned_squad_en_5.2.0_3.0_1700008477891.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_linkbert_base_finetuned_squad_en_5.2.0_3.0_1700008477891.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_linkbert_base_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_linkbert_base_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.link_bert.squad.base_finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_linkbert_base_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/niklaspm/linkbert-base-finetuned-squad
+- https://arxiv.org/abs/2203.15827
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_logo_qna_model_tr.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_logo_qna_model_tr.md
new file mode 100644
index 00000000000000..632d80dc16f36c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_logo_qna_model_tr.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering model (from yunusemreemik)
+author: John Snow Labs
+name: bert_qa_logo_qna_model
+date: 2023-11-15
+tags: [tr, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `logo-qna-model` is a Turkish model orginally trained by `yunusemreemik`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_logo_qna_model_tr_5.2.0_3.0_1700011067624.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_logo_qna_model_tr_5.2.0_3.0_1700011067624.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_logo_qna_model","tr") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_logo_qna_model","tr")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("tr.answer_question.bert.by_yunusemreemik").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_logo_qna_model|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|tr|
+|Size:|412.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/yunusemreemik/logo-qna-model
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_loodos_bert_base_uncased_qa_fine_tuned_tr.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_loodos_bert_base_uncased_qa_fine_tuned_tr.md
new file mode 100644
index 00000000000000..83a98c69997e30
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_loodos_bert_base_uncased_qa_fine_tuned_tr.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: Turkish bert_qa_loodos_bert_base_uncased_qa_fine_tuned BertForQuestionAnswering from oguzhanolm
+author: John Snow Labs
+name: bert_qa_loodos_bert_base_uncased_qa_fine_tuned
+date: 2023-11-15
+tags: [bert, tr, open_source, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_loodos_bert_base_uncased_qa_fine_tuned` is a Turkish model originally trained by oguzhanolm.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_loodos_bert_base_uncased_qa_fine_tuned_tr_5.2.0_3.0_1700008767611.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_loodos_bert_base_uncased_qa_fine_tuned_tr_5.2.0_3.0_1700008767611.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_loodos_bert_base_uncased_qa_fine_tuned|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|tr|
+|Size:|412.0 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+https://huggingface.co/oguzhanolm/loodos-bert-base-uncased-QA-fine-tuned
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_bengali_tydiqa_qa_bn.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_bengali_tydiqa_qa_bn.md
new file mode 100644
index 00000000000000..783a28a8ade881
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_bengali_tydiqa_qa_bn.md
@@ -0,0 +1,113 @@
+---
+layout: model
+title: Bangla BertForQuestionAnswering model (from sagorsarker)
+author: John Snow Labs
+name: bert_qa_mbert_bengali_tydiqa_qa
+date: 2023-11-15
+tags: [bn, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: bn
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mbert-bengali-tydiqa-qa` is a Bangla model orginally trained by `sagorsarker`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_bengali_tydiqa_qa_bn_5.2.0_3.0_1700011657711.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_bengali_tydiqa_qa_bn_5.2.0_3.0_1700011657711.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mbert_bengali_tydiqa_qa","bn") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_mbert_bengali_tydiqa_qa","bn")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("bn.answer_question.tydiqa.multi_lingual_bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mbert_bengali_tydiqa_qa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|bn|
+|Size:|625.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sagorsarker/mbert-bengali-tydiqa-qa
+- https://github.com/sagorbrur
+- https://github.com/sagorbrur/bntransformer
+- https://github.com/google-research-datasets/tydiqa
+- https://www.linkedin.com/in/sagor-sarker/
+- https://www.kaggle.com/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev_xx.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev_xx.md
new file mode 100644
index 00000000000000..264ff2b0e64221
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev_xx.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Multilingual bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev BertForQuestionAnswering from roshnir
+author: John Snow Labs
+name: bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev
+date: 2023-11-15
+tags: [bert, xx, open_source, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev` is a Multilingual model originally trained by roshnir.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev_xx_5.2.0_3.0_1700006955082.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_arabic_hindi_dev_xx_5.2.0_3.0_1700006955082.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mbert_finetuned_mlqa_chinese_hindi_dev|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|625.5 MB|
+
+## References
+
+https://huggingface.co/roshnir/mBert-finetuned-mlqa-dev-zh-hi
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_dev_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_dev_en.md
new file mode 100644
index 00000000000000..2f463d194d8e4a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_dev_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from roshnir)
+author: John Snow Labs
+name: bert_qa_mbert_finetuned_mlqa_dev
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mBert-finetuned-mlqa-dev-en` is a English model originally trained by `roshnir`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_dev_en_5.2.0_3.0_1700009322395.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_dev_en_5.2.0_3.0_1700009322395.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mbert_finetuned_mlqa_dev","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_mbert_finetuned_mlqa_dev","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.mlqa.finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mbert_finetuned_mlqa_dev|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|625.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/roshnir/mBert-finetuned-mlqa-dev-en
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev_xx.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev_xx.md
new file mode 100644
index 00000000000000..21d722fa2ecd27
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev_xx.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Multilingual bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev BertForQuestionAnswering from roshnir
+author: John Snow Labs
+name: bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev
+date: 2023-11-15
+tags: [bert, xx, open_source, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev` is a Multilingual model originally trained by roshnir.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev_xx_5.2.0_3.0_1700009561613.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mbert_finetuned_mlqa_english_chinese_hindi_dev_xx_5.2.0_3.0_1700009561613.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mbert_finetuned_mlqa_spanish_hindi_dev|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|625.5 MB|
+
+## References
+
+https://huggingface.co/roshnir/mBert-finetuned-mlqa-dev-es-hi
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_en.md
new file mode 100644
index 00000000000000..b7f20c49a8bdba
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Cased model (from srcocotero)
+author: John Snow Labs
+name: bert_qa_mini
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mini-bert-qa` is a English model originally trained by `srcocotero`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mini_en_5.2.0_3.0_1700012043479.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mini_en_5.2.0_3.0_1700012043479.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_mini","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_mini","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mini|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|41.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/srcocotero/mini-bert-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_finetuned_squad_en.md
new file mode 100644
index 00000000000000..713265f9883fd2
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mini_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Cased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_mini_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-mini-finetuned-squad` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mini_finetuned_squad_en_5.2.0_3.0_1700007275952.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mini_finetuned_squad_en_5.2.0_3.0_1700007275952.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mini_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_mini_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.mini_finetuned").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mini_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|41.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/bert-mini-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_l12_h384_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_l12_h384_uncased_squad_en.md
new file mode 100644
index 00000000000000..fdf35476072e13
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_l12_h384_uncased_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Mini Uncased model (from haritzpuerto)
+author: John Snow Labs
+name: bert_qa_minilm_l12_h384_uncased_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `MiniLM-L12-H384-uncased-squad` is a English model originally trained by `haritzpuerto`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_minilm_l12_h384_uncased_squad_en_5.2.0_3.0_1700012168987.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_minilm_l12_h384_uncased_squad_en_5.2.0_3.0_1700012168987.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_minilm_l12_h384_uncased_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_minilm_l12_h384_uncased_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.uncased_mini_lm_mini").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_minilm_l12_h384_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|123.8 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/haritzpuerto/MiniLM-L12-H384-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_uncased_squad2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_uncased_squad2_en.md
new file mode 100644
index 00000000000000..17a50e215e43bc
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_minilm_uncased_squad2_en.md
@@ -0,0 +1,120 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from deepset)
+author: John Snow Labs
+name: bert_qa_minilm_uncased_squad2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `minilm-uncased-squad2` is a English model orginally trained by `deepset`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_minilm_uncased_squad2_en_5.2.0_3.0_1700009958896.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_minilm_uncased_squad2_en_5.2.0_3.0_1700009958896.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_minilm_uncased_squad2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_minilm_uncased_squad2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.mini_lm_base_uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_minilm_uncased_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|123.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/deepset/minilm-uncased-squad2
+- https://github.com/deepset-ai/haystack/discussions
+- https://deepset.ai
+- https://github.com/deepset-ai/FARM/blob/master/examples/question_answering.py
+- https://twitter.com/deepset_ai
+- http://www.deepset.ai/jobs
+- https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
+- https://haystack.deepset.ai/community/join
+- https://github.com/deepset-ai/haystack/
+- https://deepset.ai/german-bert
+- https://www.linkedin.com/company/deepset-ai/
+- https://github.com/deepset-ai/FARM
+- https://deepset.ai/germanquad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mod_7_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mod_7_squad_en.md
new file mode 100644
index 00000000000000..825aa3a9ab16c5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mod_7_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Go2Heart)
+author: John Snow Labs
+name: bert_qa_mod_7_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `BERT_Mod_7_Squad` is a English model originally trained by `Go2Heart`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mod_7_squad_en_5.2.0_3.0_1700012402588.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mod_7_squad_en_5.2.0_3.0_1700012402588.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_mod_7_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_mod_7_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mod_7_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|406.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Go2Heart/BERT_Mod_7_Squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_model_output_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_model_output_en.md
new file mode 100644
index 00000000000000..261e7cce9f4c5f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_model_output_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from SanayCo)
+author: John Snow Labs
+name: bert_qa_model_output
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `model_output` is a English model orginally trained by `SanayCo`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_model_output_en_5.2.0_3.0_1700012692609.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_model_output_en_5.2.0_3.0_1700012692609.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_model_output","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_model_output","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.by_SanayCo").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_model_output|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SanayCo/model_output
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelbin_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelbin_en.md
new file mode 100644
index 00000000000000..0b6aeef4a55a6a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelbin_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from JAlexis)
+author: John Snow Labs
+name: bert_qa_modelbin
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `modelbin` is a English model originally trained by `JAlexis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_modelbin_en_5.2.0_3.0_1700010098432.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_modelbin_en_5.2.0_3.0_1700010098432.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelbin","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelbin","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_modelbin|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/JAlexis/modelbin
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelf_01_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelf_01_en.md
new file mode 100644
index 00000000000000..28e536bda3695f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelf_01_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from JAlexis)
+author: John Snow Labs
+name: bert_qa_modelf_01
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `modelF_01` is a English model originally trained by `JAlexis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_modelf_01_en_5.2.0_3.0_1700012948741.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_modelf_01_en_5.2.0_3.0_1700012948741.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelf_01","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelf_01","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_modelf_01|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/JAlexis/modelF_01
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelonwhol_tr.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelonwhol_tr.md
new file mode 100644
index 00000000000000..82730aaad887b9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelonwhol_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Cased model (from Aybars)
+author: John Snow Labs
+name: bert_qa_modelonwhol
+date: 2023-11-15
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ModelOnWhole` is a Turkish model originally trained by `Aybars`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_modelonwhol_tr_5.2.0_3.0_1700013291003.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_modelonwhol_tr_5.2.0_3.0_1700013291003.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_modelonwhol","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_modelonwhol","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_modelonwhol|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|688.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Aybars/ModelOnWhole
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelv2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelv2_en.md
new file mode 100644
index 00000000000000..4843431a8df0db
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_modelv2_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from JAlexis)
+author: John Snow Labs
+name: bert_qa_modelv2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `modelv2` is a English model originally trained by `JAlexis`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_modelv2_en_5.2.0_3.0_1700013581942.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_modelv2_en_5.2.0_3.0_1700013581942.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelv2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_modelv2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_modelv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/JAlexis/modelv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_multilingual_cased_finetuned_squad_xx.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_multilingual_cased_finetuned_squad_xx.md
new file mode 100644
index 00000000000000..b1287133a77095
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_multilingual_cased_finetuned_squad_xx.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering Base Cased model (from monakth)
+author: John Snow Labs
+name: bert_qa_monakth_base_multilingual_cased_finetuned_squad
+date: 2023-11-15
+tags: [xx, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-squad` is a Multilingual model originally trained by `monakth`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700015431790.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700015431790.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_multilingual_cased_finetuned_squad","xx")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_multilingual_cased_finetuned_squad","xx")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_monakth_base_multilingual_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|xx|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/monakth/bert-base-multilingual-cased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_uncased_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_uncased_finetuned_squad_en.md
new file mode 100644
index 00000000000000..f1d9be7b909a1d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_monakth_base_uncased_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from monakth)
+author: John Snow Labs
+name: bert_qa_monakth_base_uncased_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-uncased-finetuned-squad` is a English model originally trained by `monakth`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007526590.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_monakth_base_uncased_finetuned_squad_en_5.2.0_3.0_1700007526590.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_uncased_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_monakth_base_uncased_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_monakth_base_uncased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/monakth/bert-base-uncased-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mqa_cls_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mqa_cls_en.md
new file mode 100644
index 00000000000000..3237dc65acbea3
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mqa_cls_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from xraychen)
+author: John Snow Labs
+name: bert_qa_mqa_cls
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mqa-cls` is a English model orginally trained by `xraychen`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mqa_cls_en_5.2.0_3.0_1700010393249.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mqa_cls_en_5.2.0_3.0_1700010393249.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mqa_cls","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_mqa_cls","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_qu estion.mqa_cls.bert.by_xraychen").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mqa_cls|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/xraychen/mqa-cls
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mrp_bert_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mrp_bert_finetuned_squad_en.md
new file mode 100644
index 00000000000000..18e46ce5c927dc
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mrp_bert_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from mrp)
+author: John Snow Labs
+name: bert_qa_mrp_bert_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model orginally trained by `mrp`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mrp_bert_finetuned_squad_en_5.2.0_3.0_1700007840846.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mrp_bert_finetuned_squad_en_5.2.0_3.0_1700007840846.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mrp_bert_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_mrp_bert_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.by_mrp").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mrp_bert_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/mrp/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multi_uncased_trained_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multi_uncased_trained_squadv2_en.md
new file mode 100644
index 00000000000000..c8280a75e9191c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multi_uncased_trained_squadv2_en.md
@@ -0,0 +1,101 @@
+---
+layout: model
+title: English BertForQuestionAnswering Uncased model (from roshnir)
+author: John Snow Labs
+name: bert_qa_multi_uncased_trained_squadv2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-multi-uncased-trained-squadv2` is a English model originally trained by `roshnir`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multi_uncased_trained_squadv2_en_5.2.0_3.0_1700010782611.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multi_uncased_trained_squadv2_en_5.2.0_3.0_1700010782611.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multi_uncased_trained_squadv2","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_multi_uncased_trained_squadv2","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squadv2.uncased_v2").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multi_uncased_trained_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|625.5 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/roshnir/bert-multi-uncased-trained-squadv2
+- https://aclanthology.org/2020.acl-main.421.pdf%5D
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_base_cased_chines_zh.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_base_cased_chines_zh.md
new file mode 100644
index 00000000000000..7caac2232692e4
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_base_cased_chines_zh.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering Base Cased model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_base_cased_chines
+date: 2023-11-15
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-chinese` is a Chinese model originally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_base_cased_chines_zh_5.2.0_3.0_1700010354850.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_base_cased_chines_zh_5.2.0_3.0_1700010354850.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_base_cased_chines|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-chinese
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_arabic_ar.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_arabic_ar.md
new file mode 100644
index 00000000000000..e05c78e29bf6a5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_arabic_ar.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Arabic BertForQuestionAnswering model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_bert_base_cased_arabic
+date: 2023-11-15
+tags: [open_source, question_answering, bert, ar, onnx]
+task: Question Answering
+language: ar
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-arabic` is a Arabic model orginally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_arabic_ar_5.2.0_3.0_1700011124489.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_arabic_ar_5.2.0_3.0_1700011124489.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multilingual_bert_base_cased_arabic","ar") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_multilingual_bert_base_cased_arabic","ar")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("ar.answer_question.bert.multilingual_arabic_tuned_base_cased.by_bhavikardeshna").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_bert_base_cased_arabic|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|ar|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-arabic
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_german_de.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_german_de.md
new file mode 100644
index 00000000000000..fb31a29fa1e920
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_german_de.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: German BertForQuestionAnswering model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_bert_base_cased_german
+date: 2023-11-15
+tags: [open_source, question_answering, bert, de, onnx]
+task: Question Answering
+language: de
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-german` is a German model orginally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_german_de_5.2.0_3.0_1700010746503.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_german_de_5.2.0_3.0_1700010746503.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multilingual_bert_base_cased_german","de") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_multilingual_bert_base_cased_german","de")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("de.answer_question.bert.multilingual_german_tuned_base_cased.by_bhavikardeshna").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_bert_base_cased_german|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|de|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-german
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_spanish_es.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_spanish_es.md
new file mode 100644
index 00000000000000..5050f6bbcd5596
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_spanish_es.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Castilian, Spanish BertForQuestionAnswering model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_bert_base_cased_spanish
+date: 2023-11-15
+tags: [open_source, question_answering, bert, es, onnx]
+task: Question Answering
+language: es
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-spanish` is a Castilian, Spanish model orginally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_spanish_es_5.2.0_3.0_1700008189037.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_spanish_es_5.2.0_3.0_1700008189037.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multilingual_bert_base_cased_spanish","es") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_multilingual_bert_base_cased_spanish","es")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("es.answer_question.bert.multilingual_spanish_tuned_base_cased.by_bhavikardeshna").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_bert_base_cased_spanish|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|es|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-spanish
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_vietnamese_vi.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_vietnamese_vi.md
new file mode 100644
index 00000000000000..410b9d34f94da3
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_multilingual_bert_base_cased_vietnamese_vi.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Vietnamese BertForQuestionAnswering model (from bhavikardeshna)
+author: John Snow Labs
+name: bert_qa_multilingual_bert_base_cased_vietnamese
+date: 2023-11-15
+tags: [open_source, question_answering, bert, vi, onnx]
+task: Question Answering
+language: vi
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `multilingual-bert-base-cased-vietnamese` is a Vietnamese model orginally trained by `bhavikardeshna`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_vietnamese_vi_5.2.0_3.0_1700008525364.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_multilingual_bert_base_cased_vietnamese_vi_5.2.0_3.0_1700008525364.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_multilingual_bert_base_cased_vietnamese","vi") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_multilingual_bert_base_cased_vietnamese","vi")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("vi.answer_question.bert.multilingual_vietnamese_tuned_base_cased.by_bhavikardeshna").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_multilingual_bert_base_cased_vietnamese|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|vi|
+|Size:|665.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/bhavikardeshna/multilingual-bert-base-cased-vietnamese
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_muril_large_squad2_hi.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_muril_large_squad2_hi.md
new file mode 100644
index 00000000000000..7c6ffd687dd0ff
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_muril_large_squad2_hi.md
@@ -0,0 +1,110 @@
+---
+layout: model
+title: Hindi BertForQuestionAnswering model (from Sindhu)
+author: John Snow Labs
+name: bert_qa_muril_large_squad2
+date: 2023-11-15
+tags: [open_source, question_answering, bert, hi, onnx]
+task: Question Answering
+language: hi
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `muril-large-squad2` is a Hindi model orginally trained by `Sindhu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_muril_large_squad2_hi_5.2.0_3.0_1700011602780.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_muril_large_squad2_hi_5.2.0_3.0_1700011602780.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_muril_large_squad2","hi") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_muril_large_squad2","hi")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("hi.answer_question.squadv2.bert.large").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_muril_large_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|hi|
+|Size:|1.9 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Sindhu/muril-large-squad2
+- https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
+- https://twitter.com/batw0man
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mymild_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mymild_finetuned_squad_en.md
new file mode 100644
index 00000000000000..d7de9c5613a3e2
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_mymild_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from MyMild)
+author: John Snow Labs
+name: bert_qa_mymild_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `MyMild`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_mymild_finetuned_squad_en_5.2.0_3.0_1700011622327.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_mymild_finetuned_squad_en_5.2.0_3.0_1700011622327.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_mymild_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_mymild_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_MyMild").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_mymild_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/MyMild/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neg_komrc_train_ko.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neg_komrc_train_ko.md
new file mode 100644
index 00000000000000..8aa111b9b97c7d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neg_komrc_train_ko.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Korean BertForQuestionAnswering Cased model (from Taekyoon)
+author: John Snow Labs
+name: bert_qa_neg_komrc_train
+date: 2023-11-15
+tags: [ko, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ko
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `neg_komrc_train` is a Korean model originally trained by `Taekyoon`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_neg_komrc_train_ko_5.2.0_3.0_1700011887300.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_neg_komrc_train_ko_5.2.0_3.0_1700011887300.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_neg_komrc_train","ko") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_neg_komrc_train","ko")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("내 이름은 무엇입니까?", "제 이름은 클라라이고 저는 버클리에 살고 있습니다.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_neg_komrc_train|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ko|
+|Size:|406.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Taekyoon/neg_komrc_train
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ner_conll_base_uncased_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ner_conll_base_uncased_en.md
new file mode 100644
index 00000000000000..c67cdb22dfe55a
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ner_conll_base_uncased_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from dayyass)
+author: John Snow Labs
+name: bert_qa_ner_conll_base_uncased
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qaner-conll-bert-base-uncased` is a English model originally trained by `dayyass`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_ner_conll_base_uncased_en_5.2.0_3.0_1700008964066.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_ner_conll_base_uncased_en_5.2.0_3.0_1700008964066.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_ner_conll_base_uncased","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_ner_conll_base_uncased","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_ner_conll_base_uncased|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/dayyass/qaner-conll-bert-base-uncased
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neuralmagic_bert_squad_12layer_0sparse_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neuralmagic_bert_squad_12layer_0sparse_en.md
new file mode 100644
index 00000000000000..b332ca26efca8b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_neuralmagic_bert_squad_12layer_0sparse_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from spacemanidol)
+author: John Snow Labs
+name: bert_qa_neuralmagic_bert_squad_12layer_0sparse
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `neuralmagic-bert-squad-12layer-0sparse` is a English model orginally trained by `spacemanidol`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_neuralmagic_bert_squad_12layer_0sparse_en_5.2.0_3.0_1700009310735.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_neuralmagic_bert_squad_12layer_0sparse_en_5.2.0_3.0_1700009310735.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_neuralmagic_bert_squad_12layer_0sparse","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_neuralmagic_bert_squad_12layer_0sparse","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_neuralmagic_bert_squad_12layer_0sparse|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/spacemanidol/neuralmagic-bert-squad-12layer-0sparse
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa_en.md
new file mode 100644
index 00000000000000..b85e014e392a07
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa BertForQuestionAnswering from AnonymousSub
+author: John Snow Labs
+name: bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa` is a English model originally trained by AnonymousSub.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa_en_5.2.0_3.0_1700011832898.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_news_pretrain_bert_ft_nepal_bhasa_newsqa_en_5.2.0_3.0_1700011832898.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_norwegian_need_tonga_tonga_islands_name_this|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|409.9 MB|
+
+## References
+
+https://huggingface.co/LenaSchmidt/no_need_to_name_this
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_output_files_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_output_files_en.md
new file mode 100644
index 00000000000000..352808a416059f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_output_files_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from sunitha)
+author: John Snow Labs
+name: bert_qa_output_files
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `output_files` is a English model orginally trained by `sunitha`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_output_files_en_5.2.0_3.0_1700012511653.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_output_files_en_5.2.0_3.0_1700012511653.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_output_files","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_output_files","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.output_files.bert.by_sunitha").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_output_files|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sunitha/output_files
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_paranoidandroid_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_paranoidandroid_finetuned_squad_en.md
new file mode 100644
index 00000000000000..953e35642bd83f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_paranoidandroid_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from ParanoidAndroid)
+author: John Snow Labs
+name: bert_qa_paranoidandroid_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `ParanoidAndroid`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_paranoidandroid_finetuned_squad_en_5.2.0_3.0_1700012780240.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_paranoidandroid_finetuned_squad_en_5.2.0_3.0_1700012780240.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_paranoidandroid_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_paranoidandroid_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_ParanoidAndroid").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_paranoidandroid_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ParanoidAndroid/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_fa.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_fa.md
new file mode 100644
index 00000000000000..a096421a26217b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Cased model (from sepiosky)
+author: John Snow Labs
+name: bert_qa_pars
+date: 2023-11-15
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ParsBERT_QA` is a Persian model originally trained by `sepiosky`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_pars_fa_5.2.0_3.0_1700012639595.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_pars_fa_5.2.0_3.0_1700012639595.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pars","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pars","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_pars|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sepiosky/ParsBERT_QA
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_question_answering_pquad_fa.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_question_answering_pquad_fa.md
new file mode 100644
index 00000000000000..22aa6c6450b4c4
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pars_question_answering_pquad_fa.md
@@ -0,0 +1,98 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Cased model (from pedramyazdipoor)
+author: John Snow Labs
+name: bert_qa_pars_question_answering_pquad
+date: 2023-11-15
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `parsbert_question_answering_PQuAD` is a Persian model originally trained by `pedramyazdipoor`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_pars_question_answering_pquad_fa_5.2.0_3.0_1700010005378.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_pars_question_answering_pquad_fa_5.2.0_3.0_1700010005378.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pars_question_answering_pquad","fa")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pars_question_answering_pquad","fa")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_pars_question_answering_pquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/pedramyazdipoor/parsbert_question_answering_PQuAD
+- https://github.com/pedramyazdipoor/ParsBert_QA_PQuAD
+- https://arxiv.org/abs/2005.12515
+- https://arxiv.org/abs/2202.06219
+- https://www.linkedin.com/in/pedram-yazdipour/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_parsbert_finetuned_persianqa_fa.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_parsbert_finetuned_persianqa_fa.md
new file mode 100644
index 00000000000000..5aa1265d171b47
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_parsbert_finetuned_persianqa_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Cased model (from marzinouri101)
+author: John Snow Labs
+name: bert_qa_parsbert_finetuned_persianqa
+date: 2023-11-15
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `parsbert-finetuned-persianQA` is a Persian model originally trained by `marzinouri101`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_parsbert_finetuned_persianqa_fa_5.2.0_3.0_1700013045523.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_parsbert_finetuned_persianqa_fa_5.2.0_3.0_1700013045523.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_parsbert_finetuned_persianqa","fa") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_parsbert_finetuned_persianqa","fa")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_parsbert_finetuned_persianqa|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|441.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/marzinouri101/parsbert-finetuned-persianQA
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pert_zh.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pert_zh.md
new file mode 100644
index 00000000000000..d5ea1007c76d39
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pert_zh.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering Cased model (from cgt)
+author: John Snow Labs
+name: bert_qa_pert
+date: 2023-11-15
+tags: [zh, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `pert-qa` is a Chinese model originally trained by `cgt`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_pert_zh_5.2.0_3.0_1700006794991.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_pert_zh_5.2.0_3.0_1700006794991.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pert","zh")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_pert","zh")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_pert|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/cgt/pert-qa
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_accelerate_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_accelerate_en.md
new file mode 100644
index 00000000000000..0f5b3df4da4211
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_accelerate_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peterhsu)
+author: John Snow Labs
+name: bert_qa_peterhsu_bert_finetuned_squad_accelerate
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad-accelerate` is a English model orginally trained by `peterhsu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_peterhsu_bert_finetuned_squad_accelerate_en_5.2.0_3.0_1700007359588.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_peterhsu_bert_finetuned_squad_accelerate_en_5.2.0_3.0_1700007359588.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_peterhsu_bert_finetuned_squad_accelerate","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_peterhsu_bert_finetuned_squad_accelerate","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.accelerate.by_peterhsu").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_peterhsu_bert_finetuned_squad_accelerate|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peterhsu/bert-finetuned-squad-accelerate
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_en.md
new file mode 100644
index 00000000000000..9005693e5705b8
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_peterhsu_bert_finetuned_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from peterhsu)
+author: John Snow Labs
+name: bert_qa_peterhsu_bert_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model orginally trained by `peterhsu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_peterhsu_bert_finetuned_squad_en_5.2.0_3.0_1700007104039.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_peterhsu_bert_finetuned_squad_en_5.2.0_3.0_1700007104039.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_peterhsu_bert_finetuned_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_peterhsu_bert_finetuned_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.bert.v2.by_peterhsu").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_peterhsu_bert_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/peterhsu/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_petros89_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_petros89_finetuned_squad_en.md
new file mode 100644
index 00000000000000..820e90fca5e293
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_petros89_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from Petros89)
+author: John Snow Labs
+name: bert_qa_petros89_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `Petros89`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_petros89_finetuned_squad_en_5.2.0_3.0_1700012871512.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_petros89_finetuned_squad_en_5.2.0_3.0_1700012871512.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_petros89_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_petros89_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_petros89_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Petros89/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pquad_fa.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pquad_fa.md
new file mode 100644
index 00000000000000..2ebe24afedbc48
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pquad_fa.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Persian BertForQuestionAnswering Cased model (from newsha)
+author: John Snow Labs
+name: bert_qa_pquad
+date: 2023-11-15
+tags: [fa, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: fa
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `PQuAD` is a Persian model originally trained by `newsha`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_pquad_fa_5.2.0_3.0_1700013178126.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_pquad_fa_5.2.0_3.0_1700013178126.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_pquad","fa") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_pquad","fa")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("اسم من چیست؟", "نام من کلارا است و من در برکلی زندگی می کنم.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_pquad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|fa|
+|Size:|606.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/newsha/PQuAD
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pubmed_bert_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pubmed_bert_squadv2_en.md
new file mode 100644
index 00000000000000..d399819f545c09
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_pubmed_bert_squadv2_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from franklu)
+author: John Snow Labs
+name: bert_qa_pubmed_bert_squadv2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `pubmed_bert_squadv2` is a English model orginally trained by `franklu`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_pubmed_bert_squadv2_en_5.2.0_3.0_1700007643318.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_pubmed_bert_squadv2_en_5.2.0_3.0_1700007643318.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_pubmed_bert_squadv2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_pubmed_bert_squadv2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2_pubmed.bert.v2").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_pubmed_bert_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|408.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/franklu/pubmed_bert_squadv2
+- https://rajpurkar.github.io/SQuAD-explorer/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qa_roberta_base_chinese_extractive_zh.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qa_roberta_base_chinese_extractive_zh.md
new file mode 100644
index 00000000000000..f915d8bf408d64
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qa_roberta_base_chinese_extractive_zh.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from liam168)
+author: John Snow Labs
+name: bert_qa_qa_roberta_base_chinese_extractive
+date: 2023-11-15
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qa-roberta-base-chinese-extractive` is a Chinese model orginally trained by `liam168`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_qa_roberta_base_chinese_extractive_zh_5.2.0_3.0_1700013334469.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_qa_roberta_base_chinese_extractive_zh_5.2.0_3.0_1700013334469.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_qa_roberta_base_chinese_extractive","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_qa_roberta_base_chinese_extractive","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert.base.by_liam168").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_qa_roberta_base_chinese_extractive|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|380.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/liam168/qa-roberta-base-chinese-extractive
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2_en.md
new file mode 100644
index 00000000000000..e2609a82f9cc3f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from Salesforce)
+author: John Snow Labs
+name: bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qaconv-bert-large-uncased-whole-word-masking-squad2` is a English model orginally trained by `Salesforce`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2_en_5.2.0_3.0_1700014185929.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2_en_5.2.0_3.0_1700014185929.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.bert.large_uncased.by_Salesforce").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_qaconv_bert_large_uncased_whole_word_masking_squad2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/Salesforce/qaconv-bert-large-uncased-whole-word-masking-squad2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qgrantq_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qgrantq_finetuned_squad_en.md
new file mode 100644
index 00000000000000..7a00e175708ac1
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_qgrantq_finetuned_squad_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from qgrantq)
+author: John Snow Labs
+name: bert_qa_qgrantq_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `qgrantq`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_qgrantq_finetuned_squad_en_5.2.0_3.0_1700007983639.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_qgrantq_finetuned_squad_en_5.2.0_3.0_1700007983639.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_qgrantq_finetuned_squad","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_qgrantq_finetuned_squad","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.squad.finetuned.by_qgrantq").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_qgrantq_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/qgrantq/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_cased_squadv2_tr.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_cased_squadv2_tr.md
new file mode 100644
index 00000000000000..53f232977a6a93
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_cased_squadv2_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Cased model (from enelpi)
+author: John Snow Labs
+name: bert_qa_question_answering_cased_squadv2
+date: 2023-11-15
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-question-answering-cased-squadv2_tr` is a Turkish model originally trained by `enelpi`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_cased_squadv2_tr_5.2.0_3.0_1700008230626.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_cased_squadv2_tr_5.2.0_3.0_1700008230626.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_question_answering_cased_squadv2","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_question_answering_cased_squadv2","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_question_answering_cased_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|412.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/enelpi/bert-question-answering-cased-squadv2_tr
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_chinese_zh.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_chinese_zh.md
new file mode 100644
index 00000000000000..5a9aacee823578
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_chinese_zh.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: Chinese BertForQuestionAnswering model (from yechen)
+author: John Snow Labs
+name: bert_qa_question_answering_chinese
+date: 2023-11-15
+tags: [zh, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: zh
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `question-answering-chinese` is a Chinese model orginally trained by `yechen`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_chinese_zh_5.2.0_3.0_1700014729623.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_chinese_zh_5.2.0_3.0_1700014729623.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_question_answering_chinese","zh") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_question_answering_chinese","zh")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("zh.answer_question.bert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_question_answering_chinese|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|zh|
+|Size:|1.2 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/yechen/question-answering-chinese
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_uncased_squadv2_tr.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_uncased_squadv2_tr.md
new file mode 100644
index 00000000000000..92b087353c7051
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_question_answering_uncased_squadv2_tr.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Turkish BertForQuestionAnswering Uncased model (from enelpi)
+author: John Snow Labs
+name: bert_qa_question_answering_uncased_squadv2
+date: 2023-11-15
+tags: [tr, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: tr
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-question-answering-uncased-squadv2_tr` is a Turkish model originally trained by `enelpi`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_uncased_squadv2_tr_5.2.0_3.0_1700008510603.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_question_answering_uncased_squadv2_tr_5.2.0_3.0_1700008510603.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_question_answering_uncased_squadv2","tr") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_question_answering_uncased_squadv2","tr")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Benim adım ne?", "Benim adım Clara ve Berkeley'de yaşıyorum.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_question_answering_uncased_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|tr|
+|Size:|412.5 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/enelpi/bert-question-answering-uncased-squadv2_tr
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_quote_attribution_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_quote_attribution_en.md
new file mode 100644
index 00000000000000..fd4c21e37c6160
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_quote_attribution_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from helliun)
+author: John Snow Labs
+name: bert_qa_quote_attribution
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `quote-attribution` is a English model originally trained by `helliun`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_quote_attribution_en_5.2.0_3.0_1700008758458.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_quote_attribution_en_5.2.0_3.0_1700008758458.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_quote_attribution","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_quote_attribution","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_quote_attribution|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/helliun/quote-attribution
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..2971b2c6c646ac
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Uncased model (from AnonymousSub)
+author: John Snow Labs
+name: bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `recipe_triplet_bert-base-uncased_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3_en_5.2.0_3.0_1700015968478.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3_en_5.2.0_3.0_1700015968478.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_recipe_triplet_base_uncased_squadv2_epochs_3|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/AnonymousSub/recipe_triplet_bert-base-uncased_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full_ru.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full_ru.md
new file mode 100644
index 00000000000000..e2e2e1f4cb6834
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full_ru.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: Russian bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full BertForQuestionAnswering from ruselkomp
+author: John Snow Labs
+name: bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full
+date: 2023-11-15
+tags: [bert, ru, open_source, question_answering, onnx]
+task: Question Answering
+language: ru
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full` is a Russian model originally trained by ruselkomp.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full_ru_5.2.0_3.0_1700010708407.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full_ru_5.2.0_3.0_1700010708407.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_ruselkomp_sbert_large_nlu_russian_finetuned_squad_full|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ru|
+|Size:|1.6 GB|
+
+## References
+
+https://huggingface.co/ruselkomp/sbert_large_nlu_ru-finetuned-squad-full
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_salti_bert_base_multilingual_cased_finetuned_squad_xx.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_salti_bert_base_multilingual_cased_finetuned_squad_xx.md
new file mode 100644
index 00000000000000..b4c4f0b76ffdff
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_salti_bert_base_multilingual_cased_finetuned_squad_xx.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: Multilingual BertForQuestionAnswering model (from salti)
+author: John Snow Labs
+name: bert_qa_salti_bert_base_multilingual_cased_finetuned_squad
+date: 2023-11-15
+tags: [open_source, question_answering, bert, xx, onnx]
+task: Question Answering
+language: xx
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-base-multilingual-cased-finetuned-squad` is a Multilingual model orginally trained by `salti`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_salti_bert_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700009569501.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_salti_bert_base_multilingual_cased_finetuned_squad_xx_5.2.0_3.0_1700009569501.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_salti_bert_base_multilingual_cased_finetuned_squad","xx") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_salti_bert_base_multilingual_cased_finetuned_squad","xx")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("xx.answer_question.squad.bert.multilingual_base_cased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_salti_bert_base_multilingual_cased_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|xx|
+|Size:|665.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/salti/bert-base-multilingual-cased-finetuned-squad
+- https://wandb.ai/salti/mBERT_QA/runs/wkqzhrp2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sangyongan30_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sangyongan30_finetuned_squad_en.md
new file mode 100644
index 00000000000000..0c0b076dc12c61
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sangyongan30_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from sangyongan30)
+author: John Snow Labs
+name: bert_qa_sangyongan30_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `sangyongan30`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sangyongan30_finetuned_squad_en_5.2.0_3.0_1700011197408.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sangyongan30_finetuned_squad_en_5.2.0_3.0_1700011197408.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sangyongan30_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sangyongan30_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sangyongan30_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/sangyongan30/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sber_full_tes_ru.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sber_full_tes_ru.md
new file mode 100644
index 00000000000000..a2af090fecd54d
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sber_full_tes_ru.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: Russian BertForQuestionAnswering Cased model (from ruselkomp)
+author: John Snow Labs
+name: bert_qa_sber_full_tes
+date: 2023-11-15
+tags: [ru, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: ru
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `sber-full-test` is a Russian model originally trained by `ruselkomp`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sber_full_tes_ru_5.2.0_3.0_1700010222750.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sber_full_tes_ru_5.2.0_3.0_1700010222750.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_sber_full_tes","ru") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["Как меня зовут?", "Меня зовут Клара, и я живу в Беркли."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_sber_full_tes","ru")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("Как меня зовут?", "Меня зовут Клара, и я живу в Беркли.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sber_full_tes|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|ru|
+|Size:|1.6 GB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ruselkomp/sber-full-test
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sbert_large_nlu_russian_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sbert_large_nlu_russian_finetuned_squad_en.md
new file mode 100644
index 00000000000000..a603e14f6d70b6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sbert_large_nlu_russian_finetuned_squad_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_sbert_large_nlu_russian_finetuned_squad BertForQuestionAnswering from ruselkomp
+author: John Snow Labs
+name: bert_qa_sbert_large_nlu_russian_finetuned_squad
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_sbert_large_nlu_russian_finetuned_squad` is a English model originally trained by ruselkomp.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sbert_large_nlu_russian_finetuned_squad_en_5.2.0_3.0_1700010689496.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sbert_large_nlu_russian_finetuned_squad_en_5.2.0_3.0_1700010689496.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sbert_large_nlu_russian_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|1.6 GB|
+
+## References
+
+https://huggingface.co/ruselkomp/sbert_large_nlu_ru-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sci_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sci_squadv2_en.md
new file mode 100644
index 00000000000000..9ecd673564a800
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sci_squadv2_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from jbrat)
+author: John Snow Labs
+name: bert_qa_sci_squadv2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `scibert-squadv2` is a English model originally trained by `jbrat`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sci_squadv2_en_5.2.0_3.0_1700011820213.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sci_squadv2_en_5.2.0_3.0_1700011820213.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sci_squadv2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sci_squadv2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sci_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|410.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/jbrat/scibert-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_nli_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_nli_squad_en.md
new file mode 100644
index 00000000000000..9160bb9c688b24
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_nli_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from amoux)
+author: John Snow Labs
+name: bert_qa_scibert_nli_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `scibert_nli_squad` is a English model orginally trained by `amoux`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_nli_squad_en_5.2.0_3.0_1700011023145.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_nli_squad_en_5.2.0_3.0_1700011023145.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_scibert_nli_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_scibert_nli_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.scibert").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_scibert_nli_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|409.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/amoux/scibert_nli_squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_en.md
new file mode 100644
index 00000000000000..08dc69192b8aca
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from LoudlySoft)
+author: John Snow Labs
+name: bert_qa_scibert_scivocab_uncased_squad
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `scibert_scivocab_uncased_squad` is a English model orginally trained by `LoudlySoft`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_scivocab_uncased_squad_en_5.2.0_3.0_1700012083245.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_scivocab_uncased_squad_en_5.2.0_3.0_1700012083245.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_scibert_scivocab_uncased_squad","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_scibert_scivocab_uncased_squad","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.scibert.uncased").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_scibert_scivocab_uncased_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|410.0 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/LoudlySoft/scibert_scivocab_uncased_squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_v2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_v2_en.md
new file mode 100644
index 00000000000000..2a2cf6b8338a95
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_scibert_scivocab_uncased_squad_v2_en.md
@@ -0,0 +1,109 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from ktrapeznikov)
+author: John Snow Labs
+name: bert_qa_scibert_scivocab_uncased_squad_v2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `scibert_scivocab_uncased_squad_v2` is a English model orginally trained by `ktrapeznikov`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_scivocab_uncased_squad_v2_en_5.2.0_3.0_1700012351870.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_scibert_scivocab_uncased_squad_v2_en_5.2.0_3.0_1700012351870.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_scibert_scivocab_uncased_squad_v2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_scibert_scivocab_uncased_squad_v2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squadv2.scibert.uncased_v2").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_scibert_scivocab_uncased_squad_v2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|410.0 MB|
+|Case sensitive:|false|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/ktrapeznikov/scibert_scivocab_uncased_squad_v2
+- https://rajpurkar.github.io/SQuAD-explorer/
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_en.md
new file mode 100644
index 00000000000000..29591c995c8c7f
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from motiondew)
+author: John Snow Labs
+name: bert_qa_sd2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-sd2` is a English model originally trained by `motiondew`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sd2_en_5.2.0_3.0_1700011295304.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sd2_en_5.2.0_3.0_1700011295304.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_sd2","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_sd2","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.sd2.by_motiondew").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sd2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/motiondew/bert-sd2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_small_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_small_en.md
new file mode 100644
index 00000000000000..75e3765e806dd9
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sd2_small_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Small Cased model (from motiondew)
+author: John Snow Labs
+name: bert_qa_sd2_small
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-sd2-small` is a English model originally trained by `motiondew`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sd2_small_en_5.2.0_3.0_1700011602299.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sd2_small_en_5.2.0_3.0_1700011602299.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_sd2_small","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_sd2_small","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.bert.small.sd2_small.by_motiondew").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sd2_small|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/motiondew/bert-sd2-small
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3_en.md
new file mode 100644
index 00000000000000..07240bc7b9a4b6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3 BertForQuestionAnswering from motiondew
+author: John Snow Labs
+name: bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3
+date: 2023-11-15
+tags: [bert, en, open_source, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP.`bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3` is a English model originally trained by motiondew.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3_en_5.2.0_3.0_1700011802544.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_set_date_1_lr_3e_5_bosnian_32_ep_3_en_5.2.0_3.0_1700011802544.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_set_date_3_lr_2e_5_bosnian_32_ep_3|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|407.2 MB|
+
+## References
+
+https://huggingface.co/motiondew/bert-set_date_3-lr-2e-5-bs-32-ep-3
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_shed_e_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_shed_e_finetuned_squad_en.md
new file mode 100644
index 00000000000000..f45c8a3127bc21
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_shed_e_finetuned_squad_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from shed-e)
+author: John Snow Labs
+name: bert_qa_shed_e_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `shed-e`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_shed_e_finetuned_squad_en_5.2.0_3.0_1700012461934.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_shed_e_finetuned_squad_en_5.2.0_3.0_1700012461934.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_shed_e_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_shed_e_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_shed_e_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/shed-e/bert-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sirah_finetuned_squad_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sirah_finetuned_squad_en.md
new file mode 100644
index 00000000000000..7b3194c54e076b
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_sirah_finetuned_squad_en.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from SiraH)
+author: John Snow Labs
+name: bert_qa_sirah_finetuned_squad
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-finetuned-squad` is a English model originally trained by `SiraH`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_sirah_finetuned_squad_en_5.2.0_3.0_1700012684291.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_sirah_finetuned_squad_en_5.2.0_3.0_1700012684291.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sirah_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_sirah_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_sirah_finetuned_squad|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|403.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/SiraH/bert-finetuned-squad
+- https://paperswithcode.com/sota?task=Question+Answering&dataset=squad_v2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_small_finetuned_cuad_longer_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_small_finetuned_cuad_longer_en.md
new file mode 100644
index 00000000000000..d7a545834d60f6
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_small_finetuned_cuad_longer_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Small Cased model (from muhtasham)
+author: John Snow Labs
+name: bert_qa_small_finetuned_cuad_longer
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bert-small-finetuned-cuad-longer` is a English model originally trained by `muhtasham`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_small_finetuned_cuad_longer_en_5.2.0_3.0_1700012855006.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_small_finetuned_cuad_longer_en_5.2.0_3.0_1700012855006.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_small_finetuned_cuad_longer","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_small_finetuned_cuad_longer","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_small_finetuned_cuad_longer|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|107.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/muhtasham/bert-small-finetuned-cuad-longer
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_span_finetuned_squadv2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_span_finetuned_squadv2_en.md
new file mode 100644
index 00000000000000..3e62227528d165
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_span_finetuned_squadv2_en.md
@@ -0,0 +1,94 @@
+---
+layout: model
+title: English BertForQuestionAnswering Cased model (from vvincentt)
+author: John Snow Labs
+name: bert_qa_span_finetuned_squadv2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BertForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-finetuned-squadv2` is a English model originally trained by `vvincentt`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_span_finetuned_squadv2_en_5.2.0_3.0_1700013138777.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_span_finetuned_squadv2_en_5.2.0_3.0_1700013138777.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_span_finetuned_squadv2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = BertForQuestionAnswering.pretrained("bert_qa_span_finetuned_squadv2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_span_finetuned_squadv2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|402.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/vvincentt/spanbert-finetuned-squadv2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..b8e6af9e370c2e
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-10` is a English model orginally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10_en_5.2.0_3.0_1700013423047.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10_en_5.2.0_3.0_1700013423047.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.span_bert.base_cased_1024d_seed_10").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|389.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..44a9f69fbc3195
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-2` is a English model orginally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2_en_5.2.0_3.0_1700013676058.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2_en_5.2.0_3.0_1700013676058.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.span_bert.base_cased_1024d_seed_2").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|389.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..3ce5503e9fd9ff
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-4` is a English model orginally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4_en_5.2.0_3.0_1700012629885.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4_en_5.2.0_3.0_1700012629885.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.span_bert.base_cased_1024d_seed_4").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_1024_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|390.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-1024-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..c245c387c433f7
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-128-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0_en_5.2.0_3.0_1700012881424.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0_en_5.2.0_3.0_1700012881424.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.span_bert.squad.cased_seed_0_base_128d_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_128_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|380.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-128-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..e17daa069a0946
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0_en_5.2.0_3.0_1700013150885.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0_en_5.2.0_3.0_1700013150885.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.span_bert.squad.cased_seed_0_base_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|375.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..69c4a1eddc98e5
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2_en_5.2.0_3.0_1700013427254.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2_en_5.2.0_3.0_1700013427254.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.span_bert.squad.cased_seed_2_base_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_16_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|375.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-16-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..2f13ec4e1a1b6c
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0_en.md
@@ -0,0 +1,108 @@
+---
+layout: model
+title: English BertForQuestionAnswering model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0
+date: 2023-11-15
+tags: [en, open_source, question_answering, bert, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-512-finetuned-squad-seed-0` is a English model orginally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0_en_5.2.0_3.0_1700013682338.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0_en_5.2.0_3.0_1700013682338.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+document_assembler = MultiDocumentAssembler() \
+.setInputCols(["question", "context"]) \
+.setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0","en") \
+.setInputCols(["document_question", "document_context"]) \
+.setOutputCol("answer") \
+.setCaseSensitive(True)
+
+pipeline = Pipeline().setStages([
+document_assembler,
+spanClassifier
+])
+
+example = spark.createDataFrame([["What's my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(example).transform(example)
+```
+```scala
+val document = new MultiDocumentAssembler()
+.setInputCols("question", "context")
+.setOutputCols("document_question", "document_context")
+
+val spanClassifier = BertForQuestionAnswering
+.pretrained("bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0","en")
+.setInputCols(Array("document_question", "document_context"))
+.setOutputCol("answer")
+.setCaseSensitive(true)
+.setMaxSentenceLength(512)
+
+val pipeline = new Pipeline().setStages(Array(document, spanClassifier))
+
+val example = Seq(
+("Where was John Lenon born?", "John Lenon was born in London and lived in Paris. My name is Sarah and I live in London."),
+("What's my name?", "My name is Clara and I live in Berkeley."))
+.toDF("question", "context")
+
+val result = pipeline.fit(example).transform(example)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.squad.span_bert.base_cased_512d_seed_0").predict("""What's my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[sentence, token]|
+|Output Labels:|[embeddings]|
+|Language:|en|
+|Size:|386.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-512-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4_en.md b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..8bfe11974a0ca3
--- /dev/null
+++ b/docs/_posts/ahmedlone127/2023-11-15-bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4_en.md
@@ -0,0 +1,100 @@
+---
+layout: model
+title: English BertForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4
+date: 2023-11-15
+tags: [en, open_source, bert, question_answering, onnx]
+task: Question Answering
+language: en
+edition: Spark NLP 5.2.0
+spark_version: 3.0
+supported: true
+engine: onnx
+annotator: BertForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained Question Answering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `spanbert-base-cased-few-shot-k-512-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+## Predicted Entities
+
+
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4_en_5.2.0_3.0_1700015715389.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4_en_5.2.0_3.0_1700015715389.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+documentAssembler = MultiDocumentAssembler() \
+ .setInputCols(["question", "context"]) \
+ .setOutputCols(["document_question", "document_context"])
+
+spanClassifier = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4","en") \
+ .setInputCols(["document_question", "document_context"]) \
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[documentAssembler, spanClassifier])
+
+data = spark.createDataFrame([["What is my name?", "My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val documentAssembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val spanClassifer = BertForQuestionAnswering.pretrained("bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document", "token"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(documentAssembler, spanClassifier))
+
+val data = Seq("What is my name?", "My name is Clara and I live in Berkeley.").toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+{:.nlu-block}
+```python
+import nlu
+nlu.load("en.answer_question.span_bert.squad.cased_seed_4_base_512d_finetuned_few_shot").predict("""What is my name?|||"My name is Clara and I live in Berkeley.""")
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|bert_qa_spanbert_base_cased_few_shot_k_512_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 5.2.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document_question, document_context]|
+|Output Labels:|[answer]|
+|Language:|en|
+|Size:|386.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|512|
+
+## References
+
+References
+
+- https://huggingface.co/anas-awadalla/spanbert-base-cased-few-shot-k-512-finetuned-squad-seed-4
\ No newline at end of file