diff --git a/docs/_posts/Cabir40/2023-01-30-t5_1zha5ono_en.md b/docs/_posts/Cabir40/2023-01-30-t5_1zha5ono_en.md
new file mode 100644
index 00000000000000..cb0df9b3906c3f
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_1zha5ono_en.md
@@ -0,0 +1,91 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from tscholak)
+author: John Snow Labs
+name: t5_1zha5ono
+date: 2023-01-30
+tags: [en, open_source, t5]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `1zha5ono` is a English model originally trained by `tscholak`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_1zha5ono_en_4.3.0_3.0_1675095935006.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_1zha5ono_en_4.3.0_3.0_1675095935006.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_autotrain_amazon_text_sum_730222226|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|284.9 MB|
+
+## References
+
+- https://huggingface.co/yogi/autotrain-amazon_text_sum-730222226
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_autotrain_inference_probability_3_900329401_en.md b/docs/_posts/Cabir40/2023-01-30-t5_autotrain_inference_probability_3_900329401_en.md
new file mode 100644
index 00000000000000..3cc9993fc31499
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_autotrain_inference_probability_3_900329401_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from jeremyccollinsmpi)
+author: John Snow Labs
+name: t5_autotrain_inference_probability_3_900329401
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `autotrain-inference_probability_3-900329401` is a English model originally trained by `jeremyccollinsmpi`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_autotrain_inference_probability_3_900329401_en_4.3.0_3.0_1675099900080.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_autotrain_inference_probability_3_900329401_en_4.3.0_3.0_1675099900080.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_autotrain_inference_probability_3_900329401|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|846.1 MB|
+
+## References
+
+- https://huggingface.co/jeremyccollinsmpi/autotrain-inference_probability_3-900329401
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_autotrain_ms_2_1174443640_en.md b/docs/_posts/Cabir40/2023-01-30-t5_autotrain_ms_2_1174443640_en.md
new file mode 100644
index 00000000000000..bbbc802e7011cf
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_autotrain_ms_2_1174443640_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from benjamyu)
+author: John Snow Labs
+name: t5_autotrain_ms_2_1174443640
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `autotrain-ms-2-1174443640` is a English model originally trained by `benjamyu`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_autotrain_ms_2_1174443640_en_4.3.0_3.0_1675099983295.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_autotrain_ms_2_1174443640_en_4.3.0_3.0_1675099983295.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_banglat5_nmt_en2bn|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|xx|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/csebuetnlp/banglat5_nmt_en_bn
+- https://github.com/csebuetnlp/normalizer
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_amazonreviews_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_amazonreviews_en.md
new file mode 100644
index 00000000000000..42b85693d4253b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_amazonreviews_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from sumedh)
+author: John Snow Labs
+name: t5_base_amazonreviews
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-amazonreviews` is a English model originally trained by `sumedh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_amazonreviews_en_4.3.0_3.0_1675107991189.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_amazonreviews_en_4.3.0_3.0_1675107991189.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_amazonreviews|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|921.3 MB|
+
+## References
+
+- https://huggingface.co/sumedh/t5-base-amazonreviews
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_askscience_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_askscience_en.md
new file mode 100644
index 00000000000000..9592ea70f67037
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_askscience_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from pszemraj)
+author: John Snow Labs
+name: t5_base_askscience
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-askscience` is a English model originally trained by `pszemraj`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_askscience_en_4.3.0_3.0_1675108079371.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_askscience_en_4.3.0_3.0_1675108079371.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_bahasa_cased|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ms|
+|Size:|473.3 MB|
+
+## References
+
+- https://huggingface.co/mesolitica/t5-base-bahasa-cased
+- https://github.com/huseinzol05/malaya/tree/master/pretrained-model/t5/prepare
+- https://github.com/google-research/text-to-text-transfer-transformer
+- https://github.com/huseinzol05/Malaya/tree/master/pretrained-model/t5
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_conversation_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_conversation_en.md
new file mode 100644
index 00000000000000..8e541d0d07566c
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_conversation_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from Supiri)
+author: John Snow Labs
+name: t5_base_conversation
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-conversation` is a English model originally trained by `Supiri`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_conversation_en_4.3.0_3.0_1675108355643.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_conversation_en_4.3.0_3.0_1675108355643.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_finetuned_bias|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|922.1 MB|
+
+## References
+
+- https://huggingface.co/erickfm/t5-base-finetuned-bias
+- https://github.com/rpryzant/neutralizing-bias
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_break_data_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_break_data_en.md
new file mode 100644
index 00000000000000..38a68493df2283
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_break_data_en.md
@@ -0,0 +1,91 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from mrm8488)
+author: John Snow Labs
+name: t5_base_finetuned_break_data
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-finetuned-break_data` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_finetuned_break_data_en_4.3.0_3.0_1675108822999.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_finetuned_break_data_en_4.3.0_3.0_1675108822999.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_finetuned_cuad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|913.5 MB|
+
+## References
+
+- https://huggingface.co/mrm8488/T5-base-finetuned-cuad
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_span_sentiment_extraction_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_span_sentiment_extraction_en.md
new file mode 100644
index 00000000000000..2726903392809b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_finetuned_span_sentiment_extraction_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from mrm8488)
+author: John Snow Labs
+name: t5_base_finetuned_span_sentiment_extraction
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-finetuned-span-sentiment-extraction` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_finetuned_span_sentiment_extraction_en_4.3.0_3.0_1675109003319.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_finetuned_span_sentiment_extraction_en_4.3.0_3.0_1675109003319.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_gnad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|919.7 MB|
+
+## References
+
+- https://huggingface.co/Einmalumdiewelt/T5-Base_GNAD
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_gnad_maxsamples_de.md b/docs/_posts/Cabir40/2023-01-30-t5_base_gnad_maxsamples_de.md
new file mode 100644
index 00000000000000..4b34c3ce80780a
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_gnad_maxsamples_de.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Base Cased model (from Einmalumdiewelt)
+author: John Snow Labs
+name: t5_base_gnad_maxsamples
+date: 2023-01-30
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `T5-Base_GNAD_MaxSamples` is a German model originally trained by `Einmalumdiewelt`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_gnad_maxsamples_de_4.3.0_3.0_1675099257674.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_gnad_maxsamples_de_4.3.0_3.0_1675099257674.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_gnad_maxsamples|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|922.8 MB|
+
+## References
+
+- https://huggingface.co/Einmalumdiewelt/T5-Base_GNAD_MaxSamples
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_grammar_correction_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_grammar_correction_en.md
new file mode 100644
index 00000000000000..808afefa952ceb
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_grammar_correction_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from vennify)
+author: John Snow Labs
+name: t5_base_grammar_correction
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-grammar-correction` is a English model originally trained by `vennify`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_grammar_correction_en_4.3.0_3.0_1675109588406.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_grammar_correction_en_4.3.0_3.0_1675109588406.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_sum_cnndm|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|fr|
+|Size:|923.2 MB|
+
+## References
+
+- https://huggingface.co/plguillou/t5-base-fr-sum-cnndm
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_base_v1.1_fine_tuned_for_question_generation_en.md b/docs/_posts/Cabir40/2023-01-30-t5_base_v1.1_fine_tuned_for_question_generation_en.md
new file mode 100644
index 00000000000000..d1b8c5cd0b489f
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_base_v1.1_fine_tuned_for_question_generation_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from ZhangCheng)
+author: John Snow Labs
+name: t5_base_v1.1_fine_tuned_for_question_generation
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `T5v1.1-Base-Fine-Tuned-for-Question-Generation` is a English model originally trained by `ZhangCheng`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_base_v1.1_fine_tuned_for_question_generation_en_4.3.0_3.0_1675099669115.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_base_v1.1_fine_tuned_for_question_generation_en_4.3.0_3.0_1675099669115.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_base_v1.1_fine_tuned_for_question_generation|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/ZhangCheng/T5v1.1-Base-Fine-Tuned-for-Question-Generation
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_cahya_base_indonesian_summarization_cased_id.md b/docs/_posts/Cabir40/2023-01-30-t5_cahya_base_indonesian_summarization_cased_id.md
new file mode 100644
index 00000000000000..d11483eb8045f6
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_cahya_base_indonesian_summarization_cased_id.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: Indonesian T5ForConditionalGeneration Base Cased model (from cahya)
+author: John Snow Labs
+name: t5_cahya_base_indonesian_summarization_cased
+date: 2023-01-30
+tags: [id, open_source, t5, tensorflow]
+task: Text Generation
+language: id
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-base-indonesian-summarization-cased` is a Indonesian model originally trained by `cahya`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_cahya_base_indonesian_summarization_cased_id_4.3.0_3.0_1675109672981.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_cahya_base_indonesian_summarization_cased_id_4.3.0_3.0_1675109672981.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_cahya_base_indonesian_summarization_cased|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|id|
+|Size:|926.2 MB|
+
+## References
+
+- https://huggingface.co/cahya/t5-base-indonesian-summarization-cased
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_caribe_capitalise_en.md b/docs/_posts/Cabir40/2023-01-30-t5_caribe_capitalise_en.md
new file mode 100644
index 00000000000000..845f761ee04422
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_caribe_capitalise_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from KES)
+author: John Snow Labs
+name: t5_caribe_capitalise
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `caribe-capitalise` is a English model originally trained by `KES`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_caribe_capitalise_en_4.3.0_3.0_1675100357729.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_caribe_capitalise_en_4.3.0_3.0_1675100357729.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_cover_letter_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|910.7 MB|
+
+## References
+
+- https://huggingface.co/nouamanetazi/cover-letter-t5-base
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ct5_base_wiki_en.md b/docs/_posts/Cabir40/2023-01-30-t5_ct5_base_wiki_en.md
new file mode 100644
index 00000000000000..687d8763e272bb
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ct5_base_wiki_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from mtreviso)
+author: John Snow Labs
+name: t5_ct5_base_wiki
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ct5-base-en-wiki` is a English model originally trained by `mtreviso`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ct5_base_wiki_en_4.3.0_3.0_1675100704207.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ct5_base_wiki_en_4.3.0_3.0_1675100704207.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ct5_base_wiki|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|928.7 MB|
+
+## References
+
+- https://huggingface.co/mtreviso/ct5-base-en-wiki
+- https://github.com/mtreviso/chunked-t5
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_en.md b/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_en.md
new file mode 100644
index 00000000000000..dcd08513c2a6a5
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from mtreviso)
+author: John Snow Labs
+name: t5_ct5_small_wiki
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ct5-small-en-wiki` is a English model originally trained by `mtreviso`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ct5_small_wiki_en_4.3.0_3.0_1675100765401.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ct5_small_wiki_en_4.3.0_3.0_1675100765401.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ct5_small_wiki|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|289.0 MB|
+
+## References
+
+- https://huggingface.co/mtreviso/ct5-small-en-wiki
+- https://github.com/mtreviso/chunked-t5
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_l2r_en.md b/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_l2r_en.md
new file mode 100644
index 00000000000000..5014d948e6519e
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ct5_small_wiki_l2r_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from mtreviso)
+author: John Snow Labs
+name: t5_ct5_small_wiki_l2r
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ct5-small-en-wiki-l2r` is a English model originally trained by `mtreviso`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ct5_small_wiki_l2r_en_4.3.0_3.0_1675100797165.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ct5_small_wiki_l2r_en_4.3.0_3.0_1675100797165.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ct5_small_wiki_l2r|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|288.7 MB|
+
+## References
+
+- https://huggingface.co/mtreviso/ct5-small-en-wiki-l2r
+- https://github.com/mtreviso/chunked-t5
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_diversiformer_de.md b/docs/_posts/Cabir40/2023-01-30-t5_diversiformer_de.md
new file mode 100644
index 00000000000000..d319f2e1a2cfa1
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_diversiformer_de.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Cased model (from diversifix)
+author: John Snow Labs
+name: t5_diversiformer
+date: 2023-01-30
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `diversiformer` is a German model originally trained by `diversifix`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_diversiformer_de_4.3.0_3.0_1675100976411.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_diversiformer_de_4.3.0_3.0_1675100976411.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_diversiformer|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|1.2 GB|
+
+## References
+
+- https://huggingface.co/diversifix/diversiformer
+- https://arxiv.org/abs/2010.11934
+- https://github.com/diversifix/diversiformer
+- https://www.gnu.org/licenses/
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_doc2query_base_msmarco_en.md b/docs/_posts/Cabir40/2023-01-30-t5_doc2query_base_msmarco_en.md
new file mode 100644
index 00000000000000..4892041e6c7e1d
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_doc2query_base_msmarco_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from macavaney)
+author: John Snow Labs
+name: t5_doc2query_base_msmarco
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `doc2query-t5-base-msmarco` is a English model originally trained by `macavaney`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_doc2query_base_msmarco_en_4.3.0_3.0_1675101186297.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_doc2query_base_msmarco_en_4.3.0_3.0_1675101186297.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ebanko_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ru|
+|Size:|927.4 MB|
+
+## References
+
+- https://huggingface.co/BlackSamorez/ebanko-base
+- https://github.com/BlackSamorez
+- https://github.com/skoltech-nlp/russe_detox_2022
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_efficient_base_dl2_en.md b/docs/_posts/Cabir40/2023-01-30-t5_efficient_base_dl2_en.md
new file mode 100644
index 00000000000000..5d8e800eff7e3c
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_efficient_base_dl2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from google)
+author: John Snow Labs
+name: t5_efficient_base_dl2
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-efficient-base-dl2` is a English model originally trained by `google`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_efficient_base_dl2_en_4.3.0_3.0_1675109902657.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_efficient_base_dl2_en_4.3.0_3.0_1675109902657.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_eva_forum_headlines|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ru|
+|Size:|981.6 MB|
+
+## References
+
+- https://huggingface.co/Kateryna/eva_ru_forum_headlines
+- https://github.com/KaterynaD/eva.ru/tree/main/Code/Notebooks/9.%20Headlines
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_fake_news_detector_en.md b/docs/_posts/Cabir40/2023-01-30-t5_fake_news_detector_en.md
new file mode 100644
index 00000000000000..57fab5f7c3f3da
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_fake_news_detector_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from cometrain)
+author: John Snow Labs
+name: t5_fake_news_detector
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fake-news-detector-t5` is a English model originally trained by `cometrain`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_fake_news_detector_en_4.3.0_3.0_1675101857981.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_fake_news_detector_en_4.3.0_3.0_1675101857981.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_fake_news_detector|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|277.3 MB|
+
+## References
+
+- https://huggingface.co/cometrain/fake-news-detector-t5
+- https://www.kaggle.com/datasets/clmentbisaillon/fake-and-real-news-dataset
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_fine_tuned_model_en.md b/docs/_posts/Cabir40/2023-01-30-t5_fine_tuned_model_en.md
new file mode 100644
index 00000000000000..6dc0912f152ad5
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_fine_tuned_model_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from marcus2000)
+author: John Snow Labs
+name: t5_fine_tuned_model
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fine_tuned_t5_model` is a English model originally trained by `marcus2000`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_fine_tuned_model_en_4.3.0_3.0_1675101922079.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_fine_tuned_model_en_4.3.0_3.0_1675101922079.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_gec_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|287.5 MB|
+
+## References
+
+- https://huggingface.co/Unbabel/gec-t5_small
+- https://arxiv.org/pdf/2106.03830.pdf
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_gemini_small_en.md b/docs/_posts/Cabir40/2023-01-30-t5_gemini_small_en.md
new file mode 100644
index 00000000000000..ca3c0f24b4725d
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_gemini_small_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from describeai)
+author: John Snow Labs
+name: t5_gemini_small
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `gemini-small` is a English model originally trained by `describeai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_gemini_small_en_4.3.0_3.0_1675102559187.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_gemini_small_en_4.3.0_3.0_1675102559187.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_gemini_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|916.0 MB|
+
+## References
+
+- https://huggingface.co/describeai/gemini-small
+- https://www.describe-ai.com/gemini
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_german_qg_e2e_quad_de.md b/docs/_posts/Cabir40/2023-01-30-t5_german_qg_e2e_quad_de.md
new file mode 100644
index 00000000000000..918a91a5b63412
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_german_qg_e2e_quad_de.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Cased model (from dehio)
+author: John Snow Labs
+name: t5_german_qg_e2e_quad
+date: 2023-01-30
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `german-qg-t5-e2e-quad` is a German model originally trained by `dehio`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_german_qg_e2e_quad_de_4.3.0_3.0_1675102645662.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_german_qg_e2e_quad_de_4.3.0_3.0_1675102645662.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_german_qg_e2e_quad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|924.3 MB|
+
+## References
+
+- https://huggingface.co/dehio/german-qg-t5-e2e-quad
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_german_qg_quad_de.md b/docs/_posts/Cabir40/2023-01-30-t5_german_qg_quad_de.md
new file mode 100644
index 00000000000000..6d2c4e3e248a08
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_german_qg_quad_de.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Cased model (from dehio)
+author: John Snow Labs
+name: t5_german_qg_quad
+date: 2023-01-30
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `german-qg-t5-quad` is a German model originally trained by `dehio`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_german_qg_quad_de_4.3.0_3.0_1675102735996.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_german_qg_quad_de_4.3.0_3.0_1675102735996.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_grammatical_error_correction|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|920.7 MB|
+
+## References
+
+- https://huggingface.co/leslyarun/grammatical-error-correction
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_hupd_small_en.md b/docs/_posts/Cabir40/2023-01-30-t5_hupd_small_en.md
new file mode 100644
index 00000000000000..e442218a8a226b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_hupd_small_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from HUPD)
+author: John Snow Labs
+name: t5_hupd_small
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `hupd-t5-small` is a English model originally trained by `HUPD`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_hupd_small_en_4.3.0_3.0_1675102875669.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_hupd_small_en_4.3.0_3.0_1675102875669.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_hupd_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|289.1 MB|
+
+## References
+
+- https://huggingface.co/HUPD/hupd-t5-small
+- https://patentdataset.org/
+- https://github.com/suzgunmirac/hupd
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_hybrid_hbh_small_ami_sum_en.md b/docs/_posts/Cabir40/2023-01-30-t5_hybrid_hbh_small_ami_sum_en.md
new file mode 100644
index 00000000000000..a7992a7660bd33
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_hybrid_hbh_small_ami_sum_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from rohitsroch)
+author: John Snow Labs
+name: t5_hybrid_hbh_small_ami_sum
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `hybrid_hbh_t5-small_ami_sum` is a English model originally trained by `rohitsroch`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_hybrid_hbh_small_ami_sum_en_4.3.0_3.0_1675102906707.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_hybrid_hbh_small_ami_sum_en_4.3.0_3.0_1675102906707.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_hybrid_hbh_small_ami_sum|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|288.9 MB|
+
+## References
+
+- https://huggingface.co/rohitsroch/hybrid_hbh_t5-small_ami_sum
+- https://doi.org/10.1145/3508546.3508640
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_idt5_qa_qg_id.md b/docs/_posts/Cabir40/2023-01-30-t5_idt5_qa_qg_id.md
new file mode 100644
index 00000000000000..c7597801391852
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_idt5_qa_qg_id.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: Indonesian T5ForConditionalGeneration Cased model (from muchad)
+author: John Snow Labs
+name: t5_idt5_qa_qg
+date: 2023-01-30
+tags: [id, open_source, t5, tensorflow]
+task: Text Generation
+language: id
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `idt5-qa-qg` is a Indonesian model originally trained by `muchad`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_idt5_qa_qg_id_4.3.0_3.0_1675102982556.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_idt5_qa_qg_id_4.3.0_3.0_1675102982556.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_indot5_base_paraphrase|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|id|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/Wikidepia/IndoT5-base-paraphrase
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_indot5_small_id.md b/docs/_posts/Cabir40/2023-01-30-t5_indot5_small_id.md
new file mode 100644
index 00000000000000..7192f390aba376
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_indot5_small_id.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: Indonesian T5ForConditionalGeneration Small Cased model (from Wikidepia)
+author: John Snow Labs
+name: t5_indot5_small
+date: 2023-01-30
+tags: [id, open_source, t5, tensorflow]
+task: Text Generation
+language: id
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `IndoT5-small` is a Indonesian model originally trained by `Wikidepia`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_indot5_small_id_4.3.0_3.0_1675097879795.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_indot5_small_id_4.3.0_3.0_1675097879795.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_indot5_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|id|
+|Size:|179.1 MB|
+
+## References
+
+- https://huggingface.co/Wikidepia/IndoT5-small
+- https://github.com/Wikidepia/indonesian_datasets/tree/master/dump/mc4
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_informal_formal_style_transfer_en.md b/docs/_posts/Cabir40/2023-01-30-t5_informal_formal_style_transfer_en.md
new file mode 100644
index 00000000000000..a63b9cc27138d1
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_informal_formal_style_transfer_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from rajistics)
+author: John Snow Labs
+name: t5_informal_formal_style_transfer
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `informal_formal_style_transfer` is a English model originally trained by `rajistics`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_informal_formal_style_transfer_en_4.3.0_3.0_1675103071459.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_informal_formal_style_transfer_en_4.3.0_3.0_1675103071459.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_it5_efficient_small_fanpage|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|it|
+|Size:|593.8 MB|
+
+## References
+
+- https://huggingface.co/efederici/it5-efficient-small-fanpage
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_it5_efficient_small_lfqa_it.md b/docs/_posts/Cabir40/2023-01-30-t5_it5_efficient_small_lfqa_it.md
new file mode 100644
index 00000000000000..466cd2c9d6dd11
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_it5_efficient_small_lfqa_it.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: Italian T5ForConditionalGeneration Small Cased model (from efederici)
+author: John Snow Labs
+name: t5_it5_efficient_small_lfqa
+date: 2023-01-30
+tags: [it, open_source, t5, tensorflow]
+task: Text Generation
+language: it
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `it5-efficient-small-lfqa` is a Italian model originally trained by `efederici`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_it5_efficient_small_lfqa_it_4.3.0_3.0_1675103827826.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_it5_efficient_small_lfqa_it_4.3.0_3.0_1675103827826.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_it5_efficient_small_lfqa|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|it|
+|Size:|594.0 MB|
+
+## References
+
+- https://huggingface.co/efederici/it5-efficient-small-lfqa
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_jainu_ja.md b/docs/_posts/Cabir40/2023-01-30-t5_jainu_ja.md
new file mode 100644
index 00000000000000..3ca3baa8ad5b03
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_jainu_ja.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: Japanese T5ForConditionalGeneration Cased model (from astremo)
+author: John Snow Labs
+name: t5_jainu
+date: 2023-01-30
+tags: [ja, open_source, t5]
+task: Text Generation
+language: ja
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `JAINU` is a Japanese model originally trained by `astremo`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_jainu_ja_4.3.0_3.0_1675097938002.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_jainu_ja_4.3.0_3.0_1675097938002.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_kbd_lat_char_tokenizer|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ru|
+|Size:|777.2 MB|
+
+## References
+
+- https://huggingface.co/anzorq/kbd_lat-ru_char_tokenizer
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ke_base_ko.md b/docs/_posts/Cabir40/2023-01-30-t5_ke_base_ko.md
new file mode 100644
index 00000000000000..b9e63b0207fa56
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ke_base_ko.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: Korean T5ForConditionalGeneration Base Cased model (from KETI-AIR)
+author: John Snow Labs
+name: t5_ke_base
+date: 2023-01-30
+tags: [ko, open_source, t5, tensorflow]
+task: Text Generation
+language: ko
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ke-t5-base-ko` is a Korean model originally trained by `KETI-AIR`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ke_base_ko_4.3.0_3.0_1675104551769.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ke_base_ko_4.3.0_3.0_1675104551769.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_kes|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|912.8 MB|
+
+## References
+
+- https://huggingface.co/KES/T5-KES
+- https://arxiv.org/abs/1702.04066
+- https://github.com/EricFillion/happy-transformer
+- https://pypi.org/project/Caribe/
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_inspec_en.md b/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_inspec_en.md
new file mode 100644
index 00000000000000..911a9846983c9f
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_inspec_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from ml6team)
+author: John Snow Labs
+name: t5_keyphrase_generation_small_inspec
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `keyphrase-generation-t5-small-inspec` is a English model originally trained by `ml6team`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_keyphrase_generation_small_inspec_en_4.3.0_3.0_1675104684365.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_keyphrase_generation_small_inspec_en_4.3.0_3.0_1675104684365.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_keyphrase_generation_small_inspec|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|280.5 MB|
+
+## References
+
+- https://huggingface.co/ml6team/keyphrase-generation-t5-small-inspec
+- https://dl.acm.org/doi/10.3115/1119355.1119383
+- https://paperswithcode.com/sota?task=Keyphrase+Generation&dataset=inspec
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_openkp_en.md b/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_openkp_en.md
new file mode 100644
index 00000000000000..9f9ed7915d69f2
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_keyphrase_generation_small_openkp_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from ml6team)
+author: John Snow Labs
+name: t5_keyphrase_generation_small_openkp
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `keyphrase-generation-t5-small-openkp` is a English model originally trained by `ml6team`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_keyphrase_generation_small_openkp_en_4.3.0_3.0_1675104714518.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_keyphrase_generation_small_openkp_en_4.3.0_3.0_1675104714518.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_kingjamesify_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|915.1 MB|
+
+## References
+
+- https://huggingface.co/swcrazyfan/KingJamesify-T5-Base
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_legacy_sl_small_sl.md b/docs/_posts/Cabir40/2023-01-30-t5_legacy_sl_small_sl.md
new file mode 100644
index 00000000000000..1f0edae22ac893
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_legacy_sl_small_sl.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: Slovenian T5ForConditionalGeneration Small Cased model (from cjvt)
+author: John Snow Labs
+name: t5_legacy_sl_small
+date: 2023-01-30
+tags: [sl, open_source, t5, tensorflow]
+task: Text Generation
+language: sl
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `legacy-t5-sl-small` is a Slovenian model originally trained by `cjvt`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_legacy_sl_small_sl_4.3.0_3.0_1675104880094.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_legacy_sl_small_sl_4.3.0_3.0_1675104880094.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_legacy_sl_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|sl|
+|Size:|178.9 MB|
+
+## References
+
+- https://huggingface.co/cjvt/legacy-t5-sl-small
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_lewip_informal_en.md b/docs/_posts/Cabir40/2023-01-30-t5_lewip_informal_en.md
new file mode 100644
index 00000000000000..49890ce449d62d
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_lewip_informal_en.md
@@ -0,0 +1,83 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from SkolkovoInstitute)
+author: John Snow Labs
+name: t5_lewip_informal
+date: 2023-01-30
+tags: [en, open_source, t5]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `LEWIP-informal` is a English model originally trained by `SkolkovoInstitute`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_lewip_informal_en_4.3.0_3.0_1675098112375.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_lewip_informal_en_4.3.0_3.0_1675098112375.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_lewip_informal|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|914.1 MB|
+
+## References
+
+- https://huggingface.co/SkolkovoInstitute/LEWIP-informal
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_logisgenerator_en.md b/docs/_posts/Cabir40/2023-01-30-t5_logisgenerator_en.md
new file mode 100644
index 00000000000000..e7709c8b7881a9
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_logisgenerator_en.md
@@ -0,0 +1,93 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from OnsElleuch)
+author: John Snow Labs
+name: t5_logisgenerator
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `logisgenerator` is a English model originally trained by `OnsElleuch`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_logisgenerator_en_4.3.0_3.0_1675104908400.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_logisgenerator_en_4.3.0_3.0_1675104908400.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_mengzit5_comment|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|zh|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/wawaup/MengziT5-Comment
+- https://github.com/lancopku/Graph-to-seq-comment-generation
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_mixqg_base_en.md b/docs/_posts/Cabir40/2023-01-30-t5_mixqg_base_en.md
new file mode 100644
index 00000000000000..a5786368165b30
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_mixqg_base_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from Salesforce)
+author: John Snow Labs
+name: t5_mixqg_base
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mixqg-base` is a English model originally trained by `Salesforce`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_mixqg_base_en_4.3.0_3.0_1675105642190.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_mixqg_base_en_4.3.0_3.0_1675105642190.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_mixqg_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|583.1 MB|
+
+## References
+
+- https://huggingface.co/Salesforce/mixqg-base
+- https://arxiv.org/abs/2110.08175
+- https://github.com/salesforce/QGen
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_msmarco_base_v1_en.md b/docs/_posts/Cabir40/2023-01-30-t5_msmarco_base_v1_en.md
new file mode 100644
index 00000000000000..756f9fe59e9283
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_msmarco_base_v1_en.md
@@ -0,0 +1,90 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from doc2query)
+author: John Snow Labs
+name: t5_msmarco_base_v1
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `msmarco-t5-base-v1` is a English model originally trained by `doc2query`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_msmarco_base_v1_en_4.3.0_3.0_1675105723928.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_msmarco_base_v1_en_4.3.0_3.0_1675105723928.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_mt5_base_sinaha_qa|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|si|
+|Size:|1.2 GB|
+
+## References
+
+- https://huggingface.co/sankhajay/mt5-base-sinaha-qa
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_mt5_small_german_finetune_mlsum_de.md b/docs/_posts/Cabir40/2023-01-30-t5_mt5_small_german_finetune_mlsum_de.md
new file mode 100644
index 00000000000000..2a30b3d9cfb618
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_mt5_small_german_finetune_mlsum_de.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Small Cased model (from ml6team)
+author: John Snow Labs
+name: t5_mt5_small_german_finetune_mlsum
+date: 2023-01-30
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mt5-small-german-finetune-mlsum` is a German model originally trained by `ml6team`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_mt5_small_german_finetune_mlsum_de_4.3.0_3.0_1675106313224.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_mt5_small_german_finetune_mlsum_de_4.3.0_3.0_1675106313224.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_mt5_small_german_finetune_mlsum|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|1.3 GB|
+
+## References
+
+- https://huggingface.co/ml6team/mt5-small-german-finetune-mlsum
+- https://github.com/pltrdy/rouge
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ner_conll_entityreplace_en.md b/docs/_posts/Cabir40/2023-01-30-t5_ner_conll_entityreplace_en.md
new file mode 100644
index 00000000000000..ba3ad6d3715b7b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ner_conll_entityreplace_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from pitehu)
+author: John Snow Labs
+name: t5_ner_conll_entityreplace
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `T5_NER_CONLL_ENTITYREPLACE` is a English model originally trained by `pitehu`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ner_conll_entityreplace_en_4.3.0_3.0_1675099568513.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ner_conll_entityreplace_en_4.3.0_3.0_1675099568513.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ner_conll_list|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|272.6 MB|
+
+## References
+
+- https://huggingface.co/pitehu/T5_NER_CONLL_LIST
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_neutrally_en.md b/docs/_posts/Cabir40/2023-01-30-t5_neutrally_en.md
new file mode 100644
index 00000000000000..6c825187704ea9
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_neutrally_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from erickfm)
+author: John Snow Labs
+name: t5_neutrally
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `neutrally` is a English model originally trained by `erickfm`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_neutrally_en_4.3.0_3.0_1675106407099.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_neutrally_en_4.3.0_3.0_1675106407099.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_rut5_tox|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ru|
+|Size:|955.7 MB|
+
+## References
+
+- https://huggingface.co/IlyaGusev/rut5_tox
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_s2orc_base_v1_en.md b/docs/_posts/Cabir40/2023-01-30-t5_s2orc_base_v1_en.md
new file mode 100644
index 00000000000000..e11fe51446820f
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_s2orc_base_v1_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from doc2query)
+author: John Snow Labs
+name: t5_s2orc_base_v1
+date: 2023-01-30
+tags: [en, open_source, t5]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `S2ORC-t5-base-v1` is a English model originally trained by `doc2query`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_s2orc_base_v1_en_4.3.0_3.0_1675098438086.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_s2orc_base_v1_en_4.3.0_3.0_1675098438086.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_sber_rut5_filler|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ru|
+|Size:|927.4 MB|
+
+## References
+
+- https://huggingface.co/IlyaGusev/sber_rut5_filler
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pmc_en.md b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pmc_en.md
new file mode 100644
index 00000000000000..09b0868d30c92f
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pmc_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from razent)
+author: John Snow Labs
+name: t5_scifive_base_pmc
+date: 2023-01-30
+tags: [en, open_source, t5]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `SciFive-base-PMC` is a English model originally trained by `razent`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pmc_en_4.3.0_3.0_1675098735326.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pmc_en_4.3.0_3.0_1675098735326.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_scifive_base_pmc|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|474.3 MB|
+
+## References
+
+- https://huggingface.co/razent/SciFive-base-PMC
+- https://arxiv.org/abs/2106.03598
+- https://github.com/justinphan3110/SciFive
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_en.md b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_en.md
new file mode 100644
index 00000000000000..5e0fd6389f9ee8
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from razent)
+author: John Snow Labs
+name: t5_scifive_base_pubmed
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `SciFive-base-Pubmed` is a English model originally trained by `razent`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pubmed_en_4.3.0_3.0_1675098922792.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pubmed_en_4.3.0_3.0_1675098922792.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_scifive_base_pubmed|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|474.3 MB|
+
+## References
+
+- https://huggingface.co/razent/SciFive-base-Pubmed
+- https://arxiv.org/abs/2106.03598
+- https://github.com/justinphan3110/SciFive
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_pmc_en.md b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_pmc_en.md
new file mode 100644
index 00000000000000..a67f15dd21490c
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_scifive_base_pubmed_pmc_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from razent)
+author: John Snow Labs
+name: t5_scifive_base_pubmed_pmc
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `SciFive-base-Pubmed_PMC` is a English model originally trained by `razent`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pubmed_pmc_en_4.3.0_3.0_1675099109583.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_scifive_base_pubmed_pmc_en_4.3.0_3.0_1675099109583.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_scifive_base_pubmed_pmc|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|474.3 MB|
+
+## References
+
+- https://huggingface.co/razent/SciFive-base-Pubmed_PMC
+- https://arxiv.org/abs/2106.03598
+- https://github.com/justinphan3110/SciFive
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_ssr_base_en.md b/docs/_posts/Cabir40/2023-01-30-t5_ssr_base_en.md
new file mode 100644
index 00000000000000..0fc9d549215fec
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_ssr_base_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from microsoft)
+author: John Snow Labs
+name: t5_ssr_base
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ssr-base` is a English model originally trained by `microsoft`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_ssr_base_en_4.3.0_3.0_1675107262685.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_ssr_base_en_4.3.0_3.0_1675107262685.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_ssr_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|926.9 MB|
+
+## References
+
+- https://huggingface.co/microsoft/ssr-base
+- https://arxiv.org/abs/2101.00416
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_stackexchange_base_v1_en.md b/docs/_posts/Cabir40/2023-01-30-t5_stackexchange_base_v1_en.md
new file mode 100644
index 00000000000000..46b137aeb82236
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_stackexchange_base_v1_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from doc2query)
+author: John Snow Labs
+name: t5_stackexchange_base_v1
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `stackexchange-t5-base-v1` is a English model originally trained by `doc2query`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_stackexchange_base_v1_en_4.3.0_3.0_1675107351420.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_stackexchange_base_v1_en_4.3.0_3.0_1675107351420.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_stocks_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|277.2 MB|
+
+## References
+
+- https://huggingface.co/cometrain/stocks-news-t5
+- https://www.kaggle.com/datasets/sbhatti/financial-sentiment-analysis
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_t2t_adex_prompt_en.md b/docs/_posts/Cabir40/2023-01-30-t5_t2t_adex_prompt_en.md
new file mode 100644
index 00000000000000..3431742781eb59
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_t2t_adex_prompt_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from gokceuludogan)
+author: John Snow Labs
+name: t5_t2t_adex_prompt
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t2t-adeX-prompt` is a English model originally trained by `gokceuludogan`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_t2t_adex_prompt_en_4.3.0_3.0_1675107607187.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_t2t_adex_prompt_en_4.3.0_3.0_1675107607187.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_t2t_adex_prompt|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|925.3 MB|
+
+## References
+
+- https://huggingface.co/gokceuludogan/t2t-adeX-prompt
+- https://github.com/gokceuludogan/boun-tabi-smm4h22
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_t2t_assert_ade_balanced_en.md b/docs/_posts/Cabir40/2023-01-30-t5_t2t_assert_ade_balanced_en.md
new file mode 100644
index 00000000000000..4c5d1892896920
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_t2t_assert_ade_balanced_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from yirmibesogluz)
+author: John Snow Labs
+name: t5_t2t_assert_ade_balanced
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t2t-assert-ade-balanced` is a English model originally trained by `yirmibesogluz`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_t2t_assert_ade_balanced_en_4.3.0_3.0_1675107688482.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_t2t_assert_ade_balanced_en_4.3.0_3.0_1675107688482.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_t2t_assert_ade_balanced|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|917.2 MB|
+
+## References
+
+- https://huggingface.co/yirmibesogluz/t2t-assert-ade-balanced
+- https://github.com/gokceuludogan/boun-tabi-smm4h22
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_t2t_ner_ade_balanced_en.md b/docs/_posts/Cabir40/2023-01-30-t5_t2t_ner_ade_balanced_en.md
new file mode 100644
index 00000000000000..555c04293dc33a
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_t2t_ner_ade_balanced_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from yirmibesogluz)
+author: John Snow Labs
+name: t5_t2t_ner_ade_balanced
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t2t-ner-ade-balanced` is a English model originally trained by `yirmibesogluz`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_t2t_ner_ade_balanced_en_4.3.0_3.0_1675107775759.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_t2t_ner_ade_balanced_en_4.3.0_3.0_1675107775759.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_t2t_ner_ade_balanced|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|924.7 MB|
+
+## References
+
+- https://huggingface.co/yirmibesogluz/t2t-ner-ade-balanced
+- https://github.com/gokceuludogan/boun-tabi-smm4h22
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-30-t5_totto_base_bert_score_20k_steps_en.md b/docs/_posts/Cabir40/2023-01-30-t5_totto_base_bert_score_20k_steps_en.md
new file mode 100644
index 00000000000000..dafc689e5eb829
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-30-t5_totto_base_bert_score_20k_steps_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from Tejas21)
+author: John Snow Labs
+name: t5_totto_base_bert_score_20k_steps
+date: 2023-01-30
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Totto_t5_base_BERT_Score_20k_steps` is a English model originally trained by `Tejas21`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_totto_base_bert_score_20k_steps_en_4.3.0_3.0_1675099761482.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_totto_base_bert_score_20k_steps_en_4.3.0_3.0_1675099761482.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_hotel_review_sentiment|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|tw|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/clhuang/t5-hotel-review-sentiment
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_informal_en.md b/docs/_posts/Cabir40/2023-01-31-t5_informal_en.md
new file mode 100644
index 00000000000000..4491065a4fac1d
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_informal_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from SkolkovoInstitute)
+author: John Snow Labs
+name: t5_informal
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-informal` is a English model originally trained by `SkolkovoInstitute`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_informal_en_4.3.0_3.0_1675124817401.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_informal_en_4.3.0_3.0_1675124817401.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_informal|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|927.0 MB|
+
+## References
+
+- https://huggingface.co/SkolkovoInstitute/t5-informal
+- https://aclanthology.org/N18-1012/
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_inshorts_en.md b/docs/_posts/Cabir40/2023-01-31-t5_inshorts_en.md
new file mode 100644
index 00000000000000..afabc46b328cb5
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_inshorts_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from lordtt13)
+author: John Snow Labs
+name: t5_inshorts
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-inshorts` is a English model originally trained by `lordtt13`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_inshorts_en_4.3.0_3.0_1675124897561.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_inshorts_en_4.3.0_3.0_1675124897561.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_paraphrase_paws_msrp_opinosis|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|923.5 MB|
+
+## References
+
+- https://huggingface.co/ceshine/t5-paraphrase-paws-msrp-opinosis
+- https://github.com/ceshine/finetuning-t5/tree/master/paraphrase
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_paraphrase_quora_paws_en.md b/docs/_posts/Cabir40/2023-01-31-t5_paraphrase_quora_paws_en.md
new file mode 100644
index 00000000000000..32c3cb8dd4e396
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_paraphrase_quora_paws_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from ceshine)
+author: John Snow Labs
+name: t5_paraphrase_quora_paws
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-paraphrase-quora-paws` is a English model originally trained by `ceshine`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_paraphrase_quora_paws_en_4.3.0_3.0_1675125097390.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_paraphrase_quora_paws_en_4.3.0_3.0_1675125097390.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_paraphrase_quora_paws|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|924.7 MB|
+
+## References
+
+- https://huggingface.co/ceshine/t5-paraphrase-quora-paws
+- https://github.com/ceshine/finetuning-t5/tree/master/paraphrase
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_paraphraser_en.md b/docs/_posts/Cabir40/2023-01-31-t5_paraphraser_en.md
new file mode 100644
index 00000000000000..b1437c4a9c8a84
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_paraphraser_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from valurank)
+author: John Snow Labs
+name: t5_paraphraser
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-paraphraser` is a English model originally trained by `valurank`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_paraphraser_en_4.3.0_3.0_1675125180653.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_paraphraser_en_4.3.0_3.0_1675125180653.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_paraphraser|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|920.7 MB|
+
+## References
+
+- https://huggingface.co/valurank/t5-paraphraser
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_podcast_summarisation_en.md b/docs/_posts/Cabir40/2023-01-31-t5_podcast_summarisation_en.md
new file mode 100644
index 00000000000000..ce4c7b5227a105
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_podcast_summarisation_en.md
@@ -0,0 +1,91 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from paulowoicho)
+author: John Snow Labs
+name: t5_podcast_summarisation
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-podcast-summarisation` is a English model originally trained by `paulowoicho`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_podcast_summarisation_en_4.3.0_3.0_1675125262437.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_podcast_summarisation_en_4.3.0_3.0_1675125262437.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_punctuation|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|fr|
+|Size:|925.1 MB|
+
+## References
+
+- https://huggingface.co/ZakaryaRouzki/t5-punctuation
+- https://linkedin.com/in/rouzki
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_qa_squad2neg_en.md b/docs/_posts/Cabir40/2023-01-31-t5_qa_squad2neg_en.md
new file mode 100644
index 00000000000000..82355157c31f58
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_qa_squad2neg_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from ThomasNLG)
+author: John Snow Labs
+name: t5_qa_squad2neg
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-qa_squad2neg-en` is a English model originally trained by `ThomasNLG`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_qa_squad2neg_en_4.3.0_3.0_1675125429554.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_qa_squad2neg_en_4.3.0_3.0_1675125429554.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_qg_squad1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|923.2 MB|
+
+## References
+
+- https://huggingface.co/ThomasNLG/t5-qg_squad1-en
+- https://github.com/ThomasScialom/QuestEval
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_qg_webnlg_synth_en.md b/docs/_posts/Cabir40/2023-01-31-t5_qg_webnlg_synth_en.md
new file mode 100644
index 00000000000000..8de1b7545cd04b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_qg_webnlg_synth_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from ThomasNLG)
+author: John Snow Labs
+name: t5_qg_webnlg_synth
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-qg_webnlg_synth-en` is a English model originally trained by `ThomasNLG`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_qg_webnlg_synth_en_4.3.0_3.0_1675125600977.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_qg_webnlg_synth_en_4.3.0_3.0_1675125600977.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_bahasa_cased|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ms|
+|Size:|147.7 MB|
+
+## References
+
+- https://huggingface.co/mesolitica/t5-small-bahasa-cased
+- https://github.com/huseinzol05/malaya/tree/master/pretrained-model/t5/prepare
+- https://github.com/google-research/text-to-text-transfer-transformer
+- https://github.com/huseinzol05/Malaya/tree/master/pretrained-model/t5
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_bashsql_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_bashsql_en.md
new file mode 100644
index 00000000000000..e93ec80c411370
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_bashsql_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from NeuML)
+author: John Snow Labs
+name: t5_small_bashsql
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-bashsql` is a English model originally trained by `NeuML`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_bashsql_en_4.3.0_3.0_1675125918159.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_bashsql_en_4.3.0_3.0_1675125918159.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_bashsql|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|260.9 MB|
+
+## References
+
+- https://huggingface.co/NeuML/t5-small-bashsql
+- https://github.com/neuml/txtai
+- https://en.wikipedia.org/wiki/Bash_(Unix_shell)
+- https://github.com/neuml/txtai/tree/master/models/bashsql
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_en.md
new file mode 100644
index 00000000000000..f26578807426b1
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_en.md
@@ -0,0 +1,96 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from khanglam7012)
+author: John Snow Labs
+name: t5_small
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small` is a English model originally trained by `khanglam7012`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_en_4.3.0_3.0_1675125819094.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_en_4.3.0_3.0_1675125819094.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_finetuned_bias|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|284.4 MB|
+
+## References
+
+- https://huggingface.co/erickfm/t5-small-finetuned-bias
+- https://github.com/rpryzant/neutralizing-bias
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_emotion_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_emotion_en.md
new file mode 100644
index 00000000000000..5737f758058535
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_emotion_en.md
@@ -0,0 +1,95 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from mrm8488)
+author: John Snow Labs
+name: t5_small_finetuned_emotion
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-finetuned-emotion` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_emotion_en_4.3.0_3.0_1675125983774.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_emotion_en_4.3.0_3.0_1675125983774.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_finetuned_spanish_to_quechua|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|xx|
+|Size:|282.6 MB|
+
+## References
+
+- https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_squadv1_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_squadv1_en.md
new file mode 100644
index 00000000000000..ee1fbf54c32a2e
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_squadv1_en.md
@@ -0,0 +1,92 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from mrm8488)
+author: John Snow Labs
+name: t5_small_finetuned_squadv1
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-finetuned-squadv1` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_squadv1_en_4.3.0_3.0_1675126129461.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_squadv1_en_4.3.0_3.0_1675126129461.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_finetuned_text2log|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|286.3 MB|
+
+## References
+
+- https://huggingface.co/mrm8488/t5-small-finetuned-text2log
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_wikisql_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_wikisql_en.md
new file mode 100644
index 00000000000000..cd863b38f34b8c
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_finetuned_wikisql_en.md
@@ -0,0 +1,92 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from mrm8488)
+author: John Snow Labs
+name: t5_small_finetuned_wikisql
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-finetuned-wikiSQL` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_wikisql_en_4.3.0_3.0_1675126227801.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_finetuned_wikisql_en_4.3.0_3.0_1675126227801.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_german|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|de|
+|Size:|285.5 MB|
+
+## References
+
+- https://huggingface.co/Shahm/t5-small-german
+- https://paperswithcode.com/sota?task=Summarization&dataset=mlsum+de
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_grammar_correction_de.md b/docs/_posts/Cabir40/2023-01-31-t5_small_grammar_correction_de.md
new file mode 100644
index 00000000000000..acd4183030da5b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_grammar_correction_de.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: German T5ForConditionalGeneration Small Cased model (from aiassociates)
+author: John Snow Labs
+name: t5_small_grammar_correction
+date: 2023-01-31
+tags: [de, open_source, t5, tensorflow]
+task: Text Generation
+language: de
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-grammar-correction-german` is a German model originally trained by `aiassociates`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_grammar_correction_de_4.3.0_3.0_1675126287089.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_grammar_correction_de_4.3.0_3.0_1675126287089.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_grammar_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ro|
+|Size:|288.1 MB|
+
+## References
+
+- https://huggingface.co/BlackKakapo/t5-small-grammar-ro-v2
+- https://img.shields.io/badge/V.2-06.08.2022-brightgreen
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_headline_generator_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_headline_generator_en.md
new file mode 100644
index 00000000000000..aae6495911fca8
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_headline_generator_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from JulesBelveze)
+author: John Snow Labs
+name: t5_small_headline_generator
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-headline-generator` is a English model originally trained by `JulesBelveze`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_headline_generator_en_4.3.0_3.0_1675126378182.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_headline_generator_en_4.3.0_3.0_1675126378182.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_headline_generator|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|282.9 MB|
+
+## References
+
+- https://huggingface.co/JulesBelveze/t5-small-headline-generator
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_lm_adapt_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_lm_adapt_en.md
new file mode 100644
index 00000000000000..30ac1167f1becb
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_lm_adapt_en.md
@@ -0,0 +1,90 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from google)
+author: John Snow Labs
+name: t5_small_lm_adapt
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-lm-adapt` is a English model originally trained by `google`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_lm_adapt_en_4.3.0_3.0_1675126486527.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_lm_adapt_en_4.3.0_3.0_1675126486527.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_paraphrase_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ro|
+|Size:|288.8 MB|
+
+## References
+
+- https://huggingface.co/BlackKakapo/t5-small-paraphrase-ro-v2
+- https://img.shields.io/badge/V.2-17.08.2022-brightgreen
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_quora_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_quora_en.md
new file mode 100644
index 00000000000000..c7edb333baeb10
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_quora_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from hetpandya)
+author: John Snow Labs
+name: t5_small_quora
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-quora` is a English model originally trained by `hetpandya`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_quora_en_4.3.0_3.0_1675155570316.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_quora_en_4.3.0_3.0_1675155570316.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_quora|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|288.7 MB|
+
+## References
+
+- https://huggingface.co/hetpandya/t5-small-quora
+- https://github.com/hetpandya
+- https://www.linkedin.com/in/het-pandya
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_squad11_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_squad11_en.md
new file mode 100644
index 00000000000000..1b5b8a529088d2
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_squad11_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from allenai)
+author: John Snow Labs
+name: t5_small_squad11
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-squad11` is a English model originally trained by `allenai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_squad11_en_4.3.0_3.0_1675155640438.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_squad11_en_4.3.0_3.0_1675155640438.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_squad11|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|148.2 MB|
+
+## References
+
+- https://huggingface.co/allenai/t5-small-squad11
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_next_word_generator_squad_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_next_word_generator_squad_en.md
new file mode 100644
index 00000000000000..75df828da977ae
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_next_word_generator_squad_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from allenai)
+author: John Snow Labs
+name: t5_small_squad2_next_word_generator_squad
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-squad2-next-word-generator-squad` is a English model originally trained by `allenai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_squad2_next_word_generator_squad_en_4.3.0_3.0_1675155704406.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_squad2_next_word_generator_squad_en_4.3.0_3.0_1675155704406.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_squad2_next_word_generator_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|148.1 MB|
+
+## References
+
+- https://huggingface.co/allenai/t5-small-squad2-next-word-generator-squad
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_question_generation_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_question_generation_en.md
new file mode 100644
index 00000000000000..ec8158dcccfc0d
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_squad2_question_generation_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from allenai)
+author: John Snow Labs
+name: t5_small_squad2_question_generation
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-squad2-question-generation` is a English model originally trained by `allenai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_squad2_question_generation_en_4.3.0_3.0_1675155768128.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_squad2_question_generation_en_4.3.0_3.0_1675155768128.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_squad2_question_generation|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|148.2 MB|
+
+## References
+
+- https://huggingface.co/allenai/t5-small-squad2-question-generation
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_ssm_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_ssm_en.md
new file mode 100644
index 00000000000000..0f75c5db9e6e7e
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_ssm_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from google)
+author: John Snow Labs
+name: t5_small_ssm
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-ssm` is a English model originally trained by `google`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_ssm_en_4.3.0_3.0_1675155844003.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_ssm_en_4.3.0_3.0_1675155844003.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_summarization|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|ro|
+|Size:|287.1 MB|
+
+## References
+
+- https://huggingface.co/BlackKakapo/t5-small-summarization-ro
+- https://img.shields.io/badge/V.1-18.10.2022-brightgreen
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_tapaco_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_tapaco_en.md
new file mode 100644
index 00000000000000..951b2acd3607ff
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_tapaco_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from hetpandya)
+author: John Snow Labs
+name: t5_small_tapaco
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-tapaco` is a English model originally trained by `hetpandya`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_tapaco_en_4.3.0_3.0_1675155980692.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_tapaco_en_4.3.0_3.0_1675155980692.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_small_tapaco|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|288.8 MB|
+
+## References
+
+- https://huggingface.co/hetpandya/t5-small-tapaco
+- https://towardsdatascience.com/training-t5-for-paraphrase-generation-ab3b5be151a2
+- https://github.com/hetpandya
+- https://www.linkedin.com/in/het-pandya
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_small_txtsql_en.md b/docs/_posts/Cabir40/2023-01-31-t5_small_txtsql_en.md
new file mode 100644
index 00000000000000..c4ada5c24cd444
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_small_txtsql_en.md
@@ -0,0 +1,86 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from NeuML)
+author: John Snow Labs
+name: t5_small_txtsql
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5-small-txtsql` is a English model originally trained by `NeuML`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_small_txtsql_en_4.3.0_3.0_1675156028568.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_small_txtsql_en_4.3.0_3.0_1675156028568.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_test_model|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|xx|
+|Size:|260.2 MB|
+
+## References
+
+- https://huggingface.co/Lucapro/test-model
+- https://paperswithcode.com/sota?task=Translation&dataset=wmt16+ro-en
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_text2sql_en.md b/docs/_posts/Cabir40/2023-01-31-t5_text2sql_en.md
new file mode 100644
index 00000000000000..2527aa66d73514
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_text2sql_en.md
@@ -0,0 +1,84 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from dsivakumar)
+author: John Snow Labs
+name: t5_text2sql
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `text2sql` is a English model originally trained by `dsivakumar`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_text2sql_en_4.3.0_3.0_1675157342237.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_text2sql_en_4.3.0_3.0_1675157342237.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_translation_pt2en|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|xx|
+|Size:|914.0 MB|
+
+## References
+
+- https://huggingface.co/unicamp-dl/translation-pt-en-t5
+- https://github.com/unicamp-dl/Lite-T5-Translation
+- https://aclanthology.org/2020.wmt-1.90.pdf
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_triviaqa_base_en.md b/docs/_posts/Cabir40/2023-01-31-t5_triviaqa_base_en.md
new file mode 100644
index 00000000000000..81e57e8404683a
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_triviaqa_base_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from deep-learning-analytics)
+author: John Snow Labs
+name: t5_triviaqa_base
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `triviaqa-t5-base` is a English model originally trained by `deep-learning-analytics`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_triviaqa_base_en_4.3.0_3.0_1675157650979.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_triviaqa_base_en_4.3.0_3.0_1675157650979.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_uk_summarizer|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|uk|
+|Size:|995.5 MB|
+
+## References
+
+- https://huggingface.co/ukr-models/uk-summarizer
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_unifiedqa_v2_base_1363200_en.md b/docs/_posts/Cabir40/2023-01-31-t5_unifiedqa_v2_base_1363200_en.md
new file mode 100644
index 00000000000000..f17483ace9a98b
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_unifiedqa_v2_base_1363200_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from allenai)
+author: John Snow Labs
+name: t5_unifiedqa_v2_base_1363200
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `unifiedqa-v2-t5-base-1363200` is a English model originally trained by `allenai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_unifiedqa_v2_base_1363200_en_4.3.0_3.0_1675157943693.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_unifiedqa_v2_base_1363200_en_4.3.0_3.0_1675157943693.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_weighter_cnndm|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|277.8 MB|
+
+## References
+
+- https://huggingface.co/ThomasNLG/t5-weighter_cnndm-en
+- https://github.com/ThomasScialom/QuestEval
+- https://arxiv.org/abs/2103.12693
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_wikihow_small_en.md b/docs/_posts/Cabir40/2023-01-31-t5_wikihow_small_en.md
new file mode 100644
index 00000000000000..9b0ba9ac116fc7
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_wikihow_small_en.md
@@ -0,0 +1,85 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Small Cased model (from deep-learning-analytics)
+author: John Snow Labs
+name: t5_wikihow_small
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `wikihow-t5-small` is a English model originally trained by `deep-learning-analytics`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_wikihow_small_en_4.3.0_3.0_1675158602814.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_wikihow_small_en_4.3.0_3.0_1675158602814.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_wikihow_small|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|286.9 MB|
+
+## References
+
+- https://huggingface.co/deep-learning-analytics/wikihow-t5-small
+- https://medium.com/@priya.dwivedi/fine-tuning-a-t5-transformer-for-any-summarization-task-82334c64c81
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_wikisql_en2sql_en.md b/docs/_posts/Cabir40/2023-01-31-t5_wikisql_en2sql_en.md
new file mode 100644
index 00000000000000..099ae1b2322e3e
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_wikisql_en2sql_en.md
@@ -0,0 +1,87 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Cased model (from dbernsohn)
+author: John Snow Labs
+name: t5_wikisql_en2sql
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `t5_wikisql_en2SQL` is a English model originally trained by `dbernsohn`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_wikisql_en2sql_en_4.3.0_3.0_1675157192158.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_wikisql_en2sql_en_4.3.0_3.0_1675157192158.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_wikisql_sql2en|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|288.2 MB|
+
+## References
+
+- https://huggingface.co/dbernsohn/t5_wikisql_SQL2en
+- https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html
+- https://github.com/DorBernsohn/CodeLM/tree/main/SQLM
+- https://www.linkedin.com/in/dor-bernsohn-70b2b1146/
\ No newline at end of file
diff --git a/docs/_posts/Cabir40/2023-01-31-t5_yahoo_answers_base_v1_en.md b/docs/_posts/Cabir40/2023-01-31-t5_yahoo_answers_base_v1_en.md
new file mode 100644
index 00000000000000..3635d5cba01e03
--- /dev/null
+++ b/docs/_posts/Cabir40/2023-01-31-t5_yahoo_answers_base_v1_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English T5ForConditionalGeneration Base Cased model (from doc2query)
+author: John Snow Labs
+name: t5_yahoo_answers_base_v1
+date: 2023-01-31
+tags: [en, open_source, t5, tensorflow]
+task: Text Generation
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: T5Transformer
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained T5ForConditionalGeneration model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `yahoo_answers-t5-base-v1` is a English model originally trained by `doc2query`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/t5_yahoo_answers_base_v1_en_4.3.0_3.0_1675158667385.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/t5_yahoo_answers_base_v1_en_4.3.0_3.0_1675158667385.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|t5_yahoo_answers_base_v1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[documents]|
+|Output Labels:|[t5]|
+|Language:|en|
+|Size:|1.0 GB|
+
+## References
+
+- https://huggingface.co/doc2query/yahoo_answers-t5-base-v1
+- https://arxiv.org/abs/1904.08375
+- https://cs.uwaterloo.ca/~jimmylin/publications/Nogueira_Lin_2019_docTTTTTquery-v2.pdf
+- https://arxiv.org/abs/2104.08663
+- https://github.com/UKPLab/beir
+- https://www.sbert.net/examples/unsupervised_learning/query_generation/README.html
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_01_dialdoc_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_01_dialdoc_en.md
new file mode 100644
index 00000000000000..4f5dd7e272f856
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_01_dialdoc_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from alistvt)
+author: John Snow Labs
+name: roberta_qa_01_dialdoc
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `01-roberta-dialdoc` is a English model originally trained by `alistvt`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_01_dialdoc_en_4.3.0_3.0_1674206907196.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_01_dialdoc_en_4.3.0_3.0_1674206907196.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_01_dialdoc","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_01_dialdoc","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_01_dialdoc|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/alistvt/01-roberta-dialdoc
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_addi_fr_xlm_r_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_addi_fr_xlm_r_en.md
new file mode 100644
index 00000000000000..9ed066eea6af33
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_addi_fr_xlm_r_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Gantenbein)
+author: John Snow Labs
+name: roberta_qa_addi_fr_xlm_r
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ADDI-FR-XLM-R` is a English model originally trained by `Gantenbein`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_addi_fr_xlm_r_en_4.3.0_3.0_1674207724209.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_addi_fr_xlm_r_en_4.3.0_3.0_1674207724209.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_addi_fr_xlm_r","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_addi_fr_xlm_r","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_addi_fr_xlm_r|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|422.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Gantenbein/ADDI-FR-XLM-R
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_base_squad_v2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_base_squad_v2_en.md
new file mode 100644
index 00000000000000..0972b0a7b3c365
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_base_squad_v2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AyushPJ)
+author: John Snow Labs
+name: roberta_qa_ai_club_inductions_21_nlp_base_squad_v2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ai-club-inductions-21-nlp-roBERTa-base-squad-v2` is a English model originally trained by `AyushPJ`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ai_club_inductions_21_nlp_base_squad_v2_en_4.3.0_3.0_1674209021708.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ai_club_inductions_21_nlp_base_squad_v2_en_4.3.0_3.0_1674209021708.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ai_club_inductions_21_nlp_base_squad_v2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ai_club_inductions_21_nlp_base_squad_v2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ai_club_inductions_21_nlp_base_squad_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|465.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AyushPJ/ai-club-inductions-21-nlp-roBERTa-base-squad-v2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_en.md
new file mode 100644
index 00000000000000..79b8c1b716df96
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ai_club_inductions_21_nlp_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AyushPJ)
+author: John Snow Labs
+name: roberta_qa_ai_club_inductions_21_nlp
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `ai-club-inductions-21-nlp-roBERTa` is a English model originally trained by `AyushPJ`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ai_club_inductions_21_nlp_en_4.3.0_3.0_1674208962596.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ai_club_inductions_21_nlp_en_4.3.0_3.0_1674208962596.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ai_club_inductions_21_nlp","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ai_club_inductions_21_nlp","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ai_club_inductions_21_nlp|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|465.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AyushPJ/ai-club-inductions-21-nlp-roBERTa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_amitjohn007_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_amitjohn007_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..f9f1d5393bc23d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_amitjohn007_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from amitjohn007)
+author: John Snow Labs
+name: roberta_qa_amitjohn007_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `amitjohn007`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_amitjohn007_base_finetuned_squad_en_4.3.0_3.0_1674217120272.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_amitjohn007_base_finetuned_squad_en_4.3.0_3.0_1674217120272.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_amitjohn007_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_amitjohn007_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_amitjohn007_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/amitjohn007/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523213_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523213_en.md
new file mode 100644
index 00000000000000..9029ea983e5348
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523213_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AlirezaBaneshi)
+author: John Snow Labs
+name: roberta_qa_autotrain_test2_756523213
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `autotrain-test2-756523213` is a English model originally trained by `AlirezaBaneshi`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_autotrain_test2_756523213_en_4.3.0_3.0_1674209108948.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_autotrain_test2_756523213_en_4.3.0_3.0_1674209108948.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_autotrain_test2_756523213","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_autotrain_test2_756523213","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_autotrain_test2_756523213|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|415.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AlirezaBaneshi/autotrain-test2-756523213
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523214_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523214_en.md
new file mode 100644
index 00000000000000..8d3eb536e920c8
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_autotrain_test2_756523214_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AlirezaBaneshi)
+author: John Snow Labs
+name: roberta_qa_autotrain_test2_756523214
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `autotrain-test2-756523214` is a English model originally trained by `AlirezaBaneshi`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_autotrain_test2_756523214_en_4.3.0_3.0_1674209197246.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_autotrain_test2_756523214_en_4.3.0_3.0_1674209197246.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_autotrain_test2_756523214","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_autotrain_test2_756523214","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_autotrain_test2_756523214|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|415.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AlirezaBaneshi/autotrain-test2-756523214
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_avioo1_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_avioo1_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..a817ac110154d3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_avioo1_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from avioo1)
+author: John Snow Labs
+name: roberta_qa_avioo1_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `avioo1`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_avioo1_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219191405.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_avioo1_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219191405.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_avioo1_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_avioo1_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_avioo1_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/avioo1/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_en.md
new file mode 100644
index 00000000000000..f6330a2ec2a421
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ksabeh)
+author: John Snow Labs
+name: roberta_qa_base_attribute_correction
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-attribute-correction` is a English model originally trained by `ksabeh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_en_4.3.0_3.0_1674212650491.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_en_4.3.0_3.0_1674212650491.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_attribute_correction|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|430.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ksabeh/roberta-base-attribute-correction
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_en.md
new file mode 100644
index 00000000000000..1e4cb337f7d72c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ksabeh)
+author: John Snow Labs
+name: roberta_qa_base_attribute_correction_mlm
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-attribute-correction-mlm` is a English model originally trained by `ksabeh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_mlm_en_4.3.0_3.0_1674212707495.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_mlm_en_4.3.0_3.0_1674212707495.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction_mlm","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction_mlm","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_attribute_correction_mlm|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ksabeh/roberta-base-attribute-correction-mlm
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_titles_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_titles_en.md
new file mode 100644
index 00000000000000..33c1d239a47819
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_attribute_correction_mlm_titles_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ksabeh)
+author: John Snow Labs
+name: roberta_qa_base_attribute_correction_mlm_titles
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-attribute-correction-mlm-titles` is a English model originally trained by `ksabeh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_mlm_titles_en_4.3.0_3.0_1674212765867.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_attribute_correction_mlm_titles_en_4.3.0_3.0_1674212765867.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction_mlm_titles","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_attribute_correction_mlm_titles","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_attribute_correction_mlm_titles|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ksabeh/roberta-base-attribute-correction-mlm-titles
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_best_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_best_squad2_en.md
new file mode 100644
index 00000000000000..9aed0b4d745581
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_best_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from PremalMatalia)
+author: John Snow Labs
+name: roberta_qa_base_best_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-best-squad2` is a English model originally trained by `PremalMatalia`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_best_squad2_en_4.3.0_3.0_1674212826277.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_best_squad2_en_4.3.0_3.0_1674212826277.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_best_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_best_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_best_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/PremalMatalia/roberta-base-best-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_bne_becas_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_bne_becas_es.md
new file mode 100644
index 00000000000000..646cae129be7af
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_bne_becas_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_bne_becas
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-bne-ROBERTaBECAS` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_bne_becas_es_4.3.0_3.0_1674212894934.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_bne_becas_es_4.3.0_3.0_1674212894934.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_bne_becas","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_bne_becas","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_bne_becas|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|420.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-bne-ROBERTaBECAS
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ca_v2_catalan_ca.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ca_v2_catalan_ca.md
new file mode 100644
index 00000000000000..b9f8b4271307a9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ca_v2_catalan_ca.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Catalan RobertaForQuestionAnswering Base Cased model (from crodri)
+author: John Snow Labs
+name: roberta_qa_base_ca_v2_catalan
+date: 2023-01-20
+tags: [ca, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: ca
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-ca-v2-qa-catalanqa` is a Catalan model originally trained by `crodri`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_ca_v2_catalan_ca_4.3.0_3.0_1674213071415.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_ca_v2_catalan_ca_4.3.0_3.0_1674213071415.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_ca_v2_catalan","ca")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_ca_v2_catalan","ca")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_ca_v2_catalan|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|ca|
+|Size:|456.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/crodri/roberta-base-ca-v2-qa-catalanqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_chaii_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_chaii_en.md
new file mode 100644
index 00000000000000..a40edd128b8122
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_chaii_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from SauravMaheshkar)
+author: John Snow Labs
+name: roberta_qa_base_chaii
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-chaii` is a English model originally trained by `SauravMaheshkar`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_chaii_en_4.3.0_3.0_1674213133232.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_chaii_en_4.3.0_3.0_1674213133232.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_chaii","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_chaii","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_chaii|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/SauravMaheshkar/roberta-base-chaii
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_custom_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_custom_en.md
new file mode 100644
index 00000000000000..25f8235413595b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_custom_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from eAsyle)
+author: John Snow Labs
+name: roberta_qa_base_custom
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta_base_custom_QA` is a English model originally trained by `eAsyle`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_custom_en_4.3.0_3.0_1674223065625.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_custom_en_4.3.0_3.0_1674223065625.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_custom","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_custom","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_custom|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|424.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/eAsyle/roberta_base_custom_QA
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_emr_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_emr_en.md
new file mode 100644
index 00000000000000..dffd6f0304e0a6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_emr_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from emr-se-miniproject)
+author: John Snow Labs
+name: roberta_qa_base_emr
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-emr` is a English model originally trained by `emr-se-miniproject`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_emr_en_4.3.0_3.0_1674213227451.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_emr_en_4.3.0_3.0_1674213227451.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_emr","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_emr","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_emr|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/emr-se-miniproject/roberta-base-emr
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..9f017ef292fa19
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-1024-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0_en_4.3.0_3.0_1674213299788.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0_en_4.3.0_3.0_1674213299788.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|439.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..7fe6671a02cce0
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-1024-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10_en_4.3.0_3.0_1674213377790.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10_en_4.3.0_3.0_1674213377790.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|439.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42_en.md
new file mode 100644
index 00000000000000..fbf4d9eda1164e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-1024-finetuned-squad-seed-42` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42_en_4.3.0_3.0_1674213511887.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42_en_4.3.0_3.0_1674213511887.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_42|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|447.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-42
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..f1aecdc7cf42e2
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-1024-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4_en_4.3.0_3.0_1674213450529.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4_en_4.3.0_3.0_1674213450529.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|439.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..888fc29b4049ea
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-1024-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6_en_4.3.0_3.0_1674213584769.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6_en_4.3.0_3.0_1674213584769.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_1024_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|439.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-1024-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..be3987e0c11291
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10_en_4.3.0_3.0_1674213671532.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10_en_4.3.0_3.0_1674213671532.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|423.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..c7b0fc6de86781
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2_en_4.3.0_3.0_1674213755024.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2_en_4.3.0_3.0_1674213755024.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|423.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42_en.md
new file mode 100644
index 00000000000000..c5b01ea1ea39bc
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-42` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42_en_4.3.0_3.0_1674213900898.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42_en_4.3.0_3.0_1674213900898.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_42|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|431.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-42
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..45b1313526488f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4_en_4.3.0_3.0_1674213839010.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4_en_4.3.0_3.0_1674213839010.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|423.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..c01c6f8871b98f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6_en_4.3.0_3.0_1674213983376.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6_en_4.3.0_3.0_1674213983376.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|422.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..4ff5553543b735
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-128-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8_en_4.3.0_3.0_1674214068825.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8_en_4.3.0_3.0_1674214068825.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_128_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|423.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-128-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..48587f12544e1c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0_en_4.3.0_3.0_1674214155549.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0_en_4.3.0_3.0_1674214155549.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|416.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..de3f1ad937c5b8
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10_en_4.3.0_3.0_1674214242487.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10_en_4.3.0_3.0_1674214242487.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|416.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..0c8173ae18249f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2_en_4.3.0_3.0_1674214328836.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2_en_4.3.0_3.0_1674214328836.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|416.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42_en.md
new file mode 100644
index 00000000000000..18304b62464fed
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-42` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42_en_4.3.0_3.0_1674214395082.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42_en_4.3.0_3.0_1674214395082.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_42|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|425.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-42
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..6783c3a8106fb3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6_en_4.3.0_3.0_1674214482569.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6_en_4.3.0_3.0_1674214482569.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|416.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..a554c02bb62dd1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-16-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8_en_4.3.0_3.0_1674214569437.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8_en_4.3.0_3.0_1674214569437.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_16_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|415.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-16-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..3e8cf20de9718e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0_en_4.3.0_3.0_1674214651872.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0_en_4.3.0_3.0_1674214651872.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|427.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..e4f08e1de6b865
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10_en_4.3.0_3.0_1674214731317.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10_en_4.3.0_3.0_1674214731317.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|427.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..8c862f1c57f2d8
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2_en_4.3.0_3.0_1674214836583.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2_en_4.3.0_3.0_1674214836583.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|427.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..1cac84468cf56e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4_en_4.3.0_3.0_1674214918693.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4_en_4.3.0_3.0_1674214918693.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|427.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..a2fcd76d560bbd
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6_en_4.3.0_3.0_1674214999722.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6_en_4.3.0_3.0_1674214999722.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|426.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..9cfbb8e9d38225
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-256-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8_en_4.3.0_3.0_1674215082127.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8_en_4.3.0_3.0_1674215082127.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_256_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|427.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-256-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..298d8c0da52a52
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-32-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0_en_4.3.0_3.0_1674215168842.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0_en_4.3.0_3.0_1674215168842.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_32_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|417.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..14d74d6afc289c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-32-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10_en_4.3.0_3.0_1674215255291.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10_en_4.3.0_3.0_1674215255291.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_32_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|417.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-32-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..d584d0f603529f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0_en_4.3.0_3.0_1674215332011.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0_en_4.3.0_3.0_1674215332011.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|433.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..a683906ceae2bb
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10_en_4.3.0_3.0_1674215413181.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10_en_4.3.0_3.0_1674215413181.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|433.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..da311da527c173
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2_en_4.3.0_3.0_1674215491724.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2_en_4.3.0_3.0_1674215491724.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|432.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..dd0d2388a9089a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4_en_4.3.0_3.0_1674215570077.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4_en_4.3.0_3.0_1674215570077.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|432.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..47339ad99dc2ed
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6_en_4.3.0_3.0_1674215648700.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6_en_4.3.0_3.0_1674215648700.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|432.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..28d4609f57abae
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-512-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8_en_4.3.0_3.0_1674215724845.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8_en_4.3.0_3.0_1674215724845.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_512_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|432.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-512-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..bea53ab90d5d9b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0_en_4.3.0_3.0_1674215810378.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0_en_4.3.0_3.0_1674215810378.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10_en.md
new file mode 100644
index 00000000000000..37ddba6d018b63
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-10` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10_en_4.3.0_3.0_1674215893984.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10_en_4.3.0_3.0_1674215893984.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2_en.md
new file mode 100644
index 00000000000000..18dd0343abdb31
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-2` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2_en_4.3.0_3.0_1674215981311.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2_en_4.3.0_3.0_1674215981311.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..12dba3f0711c06
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4_en_4.3.0_3.0_1674216065319.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4_en_4.3.0_3.0_1674216065319.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6_en.md
new file mode 100644
index 00000000000000..e5d0696f0e2d95
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-6` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6_en_4.3.0_3.0_1674216152630.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6_en_4.3.0_3.0_1674216152630.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8_en.md
new file mode 100644
index 00000000000000..9594ee2a3b042b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-few-shot-k-64-finetuned-squad-seed-8` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8_en_4.3.0_3.0_1674216234526.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8_en_4.3.0_3.0_1674216234526.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_few_shot_k_64_finetuned_squad_seed_8|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|419.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-few-shot-k-64-finetuned-squad-seed-8
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_filtered_cuad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_filtered_cuad_en.md
new file mode 100644
index 00000000000000..7a8c6ae593ca0e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_filtered_cuad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from alex-apostolo)
+author: John Snow Labs
+name: roberta_qa_base_filtered_cuad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-filtered-cuad` is a English model originally trained by `alex-apostolo`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_filtered_cuad_en_4.3.0_3.0_1674216293189.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_filtered_cuad_en_4.3.0_3.0_1674216293189.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_filtered_cuad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_filtered_cuad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_filtered_cuad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|454.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/alex-apostolo/roberta-base-filtered-cuad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_en.md
new file mode 100644
index 00000000000000..829fb65ecdbdc0
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Gam)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_cuad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-cuad` is a English model originally trained by `Gam`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_cuad_en_4.3.0_3.0_1674216413698.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_cuad_en_4.3.0_3.0_1674216413698.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_cuad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_cuad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_cuad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|451.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Gam/roberta-base-finetuned-cuad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_gam_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_gam_en.md
new file mode 100644
index 00000000000000..5f4985f174ee0d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_cuad_gam_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Gam)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_cuad_gam
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-cuad-gam` is a English model originally trained by `Gam`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_cuad_gam_en_4.3.0_3.0_1674216483026.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_cuad_gam_en_4.3.0_3.0_1674216483026.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_cuad_gam","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_cuad_gam","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_cuad_gam|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|450.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Gam/roberta-base-finetuned-cuad-gam
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_10_en.md
new file mode 100644
index 00000000000000..2778a02dd3f7c3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_deletion_squad_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-deletion-squad-10` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_deletion_squad_10_en_4.3.0_3.0_1674216541297.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_deletion_squad_10_en_4.3.0_3.0_1674216541297.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_deletion_squad_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_deletion_squad_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_deletion_squad_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-deletion-squad-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_15_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_15_en.md
new file mode 100644
index 00000000000000..90038bccc8316e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_deletion_squad_15_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_deletion_squad_15
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-deletion-squad-15` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_deletion_squad_15_en_4.3.0_3.0_1674216599443.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_deletion_squad_15_en_4.3.0_3.0_1674216599443.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_deletion_squad_15","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_deletion_squad_15","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_deletion_squad_15|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-deletion-squad-15
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_en.md
new file mode 100644
index 00000000000000..60993820c4082c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from 123tarunanand)
+author: John Snow Labs
+name: roberta_qa_base_finetuned
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned` is a English model originally trained by `123tarunanand`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_en_4.3.0_3.0_1674216346492.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_en_4.3.0_3.0_1674216346492.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/123tarunanand/roberta-base-finetuned
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_hotpot_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_hotpot_en.md
new file mode 100644
index 00000000000000..469d5d33532165
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_hotpot_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from clevrly)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_hotpot
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-hotpot_qa` is a English model originally trained by `clevrly`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_hotpot_en_4.3.0_3.0_1674216656084.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_hotpot_en_4.3.0_3.0_1674216656084.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_hotpot","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_hotpot","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_hotpot|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/clevrly/roberta-base-finetuned-hotpot_qa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_en.md
new file mode 100644
index 00000000000000..afd9c5cd8f9204
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_10
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-10` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_10_en_4.3.0_3.0_1674216712953.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_10_en_4.3.0_3.0_1674216712953.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_10","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_10","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_10|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-10
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_new_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_new_en.md
new file mode 100644
index 00000000000000..ab82c9ef59f9c2
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_10_new_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_10_new
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-10-new` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_10_new_en_4.3.0_3.0_1674216770257.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_10_new_en_4.3.0_3.0_1674216770257.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_10_new","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_10_new","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_10_new|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-10-new
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_en.md
new file mode 100644
index 00000000000000..dffae3a3e4eb2c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_15
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-15` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_15_en_4.3.0_3.0_1674216826395.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_15_en_4.3.0_3.0_1674216826395.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_15","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_15","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_15|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-15
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_new_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_new_en.md
new file mode 100644
index 00000000000000..304b87a1c8ae32
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_15_new_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_15_new
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-15-new` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_15_new_en_4.3.0_3.0_1674216883682.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_15_new_en_4.3.0_3.0_1674216883682.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_15_new","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_15_new","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_15_new|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-15-new
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_en.md
new file mode 100644
index 00000000000000..13b4dc9572037a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_5
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-5` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_5_en_4.3.0_3.0_1674216944002.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_5_en_4.3.0_3.0_1674216944002.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_5","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_5","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_5|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-5
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_new_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_new_en.md
new file mode 100644
index 00000000000000..29edb79e7dbcc1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_scrambled_squad_5_new_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_scrambled_squad_5_new
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-scrambled-squad-5-new` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_5_new_en_4.3.0_3.0_1674216999446.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_scrambled_squad_5_new_en_4.3.0_3.0_1674216999446.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_5_new","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_scrambled_squad_5_new","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_scrambled_squad_5_new|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-scrambled-squad-5-new
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_en.md
new file mode 100644
index 00000000000000..09d9f6c0fcb12e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from mvonwyl)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad2` is a English model originally trained by `mvonwyl`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad2_en_4.3.0_3.0_1674217768093.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad2_en_4.3.0_3.0_1674217768093.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/mvonwyl/roberta-base-finetuned-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_lwt_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_lwt_en.md
new file mode 100644
index 00000000000000..650856949fae73
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad2_lwt_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from 21iridescent)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad2_lwt
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-finetuned-squad2-lwt` is a English model originally trained by `21iridescent`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad2_lwt_en_4.3.0_3.0_1674210398377.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad2_lwt_en_4.3.0_3.0_1674210398377.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad2_lwt","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad2_lwt","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad2_lwt|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/21iridescent/distilroberta-base-finetuned-squad2-lwt
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_1_en.md
new file mode 100644
index 00000000000000..0e49fe372451ba
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad_1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad-1` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_1_en_4.3.0_3.0_1674217536049.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_1_en_4.3.0_3.0_1674217536049.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad_1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-squad-1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_2_en.md
new file mode 100644
index 00000000000000..30e388f2f7ab86
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad-2` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_2_en_4.3.0_3.0_1674217595105.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_2_en_4.3.0_3.0_1674217595105.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|438.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-squad-2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_3_en.md
new file mode 100644
index 00000000000000..2f30eaa3decb08
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad-3` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_3_en_4.3.0_3.0_1674217654479.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_3_en_4.3.0_3.0_1674217654479.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-squad-3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..e23debf2901bb6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from victorlee071200)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-finetuned-squad` is a English model originally trained by `victorlee071200`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_en_4.3.0_3.0_1674210358228.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_en_4.3.0_3.0_1674210358228.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/victorlee071200/distilroberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_r3f_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_r3f_en.md
new file mode 100644
index 00000000000000..bb875c0b682d4b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_r3f_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad_r3f
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad-r3f` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_r3f_en_4.3.0_3.0_1674217712089.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_r3f_en_4.3.0_3.0_1674217712089.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_r3f","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_r3f","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad_r3f|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-base-finetuned-squad-r3f
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_v2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_v2_en.md
new file mode 100644
index 00000000000000..ce227b6e13c3a5
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_finetuned_squad_v2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from victorlee071200)
+author: John Snow Labs
+name: roberta_qa_base_finetuned_squad_v2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-finetuned-squad_v2` is a English model originally trained by `victorlee071200`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_v2_en_4.3.0_3.0_1674210438571.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_finetuned_squad_v2_en_4.3.0_3.0_1674210438571.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_v2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_finetuned_squad_v2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_finetuned_squad_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/victorlee071200/distilroberta-base-finetuned-squad_v2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ft_esg_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ft_esg_en.md
new file mode 100644
index 00000000000000..d4339e41fc63c1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_ft_esg_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Ayushb)
+author: John Snow Labs
+name: roberta_qa_base_ft_esg
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-ft-esg` is a English model originally trained by `Ayushb`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_ft_esg_en_4.3.0_3.0_1674217855538.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_ft_esg_en_4.3.0_3.0_1674217855538.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_ft_esg","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_ft_esg","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_ft_esg|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|416.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Ayushb/roberta-base-ft-esg
+- https://www.github.com/Ayush1702
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_qna_squad2_trained_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_qna_squad2_trained_en.md
new file mode 100644
index 00000000000000..7e4f3bc91a6c88
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_qna_squad2_trained_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Shappey)
+author: John Snow Labs
+name: roberta_qa_base_qna_squad2_trained
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-QnA-squad2-trained` is a English model originally trained by `Shappey`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_qna_squad2_trained_en_4.3.0_3.0_1674212572106.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_qna_squad2_trained_en_4.3.0_3.0_1674212572106.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_qna_squad2_trained","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_qna_squad2_trained","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_qna_squad2_trained|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|456.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Shappey/roberta-base-QnA-squad2-trained
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becas1_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becas1_es.md
new file mode 100644
index 00000000000000..47c7a4614068ae
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becas1_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becas1
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becas1` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becas1_es_4.3.0_3.0_1674217912605.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becas1_es_4.3.0_3.0_1674217912605.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becas1","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becas1","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becas1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becas1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos1_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos1_es.md
new file mode 100644
index 00000000000000..2e12e116c2e79b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos1_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasincentivos1
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasIncentivos1` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos1_es_4.3.0_3.0_1674217969589.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos1_es_4.3.0_3.0_1674217969589.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos1","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos1","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasincentivos1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasIncentivos1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos2_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos2_es.md
new file mode 100644
index 00000000000000..a757d3d6954b9c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos2_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasincentivos2
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasIncentivos2` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos2_es_4.3.0_3.0_1674218030841.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos2_es_4.3.0_3.0_1674218030841.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos2","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos2","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasincentivos2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasIncentivos2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos3_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos3_es.md
new file mode 100644
index 00000000000000..bebf584cffc06a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos3_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasincentivos3
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasIncentivos3` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos3_es_4.3.0_3.0_1674218087235.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos3_es_4.3.0_3.0_1674218087235.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos3","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos3","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasincentivos3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasIncentivos3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos4_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos4_es.md
new file mode 100644
index 00000000000000..cf4e8d32272caa
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos4_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasincentivos4
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasIncentivos4` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos4_es_4.3.0_3.0_1674218146985.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos4_es_4.3.0_3.0_1674218146985.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos4","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos4","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasincentivos4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasIncentivos4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos6_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos6_es.md
new file mode 100644
index 00000000000000..8c094f4baf3bbd
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasincentivos6_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasincentivos6
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasIncentivos6` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos6_es_4.3.0_3.0_1674218206378.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasincentivos6_es_4.3.0_3.0_1674218206378.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos6","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasincentivos6","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasincentivos6|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasIncentivos6
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasv3_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasv3_es.md
new file mode 100644
index 00000000000000..abb58eb926e47c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_becasv3_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_becasv3
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-becasv3` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasv3_es_4.3.0_3.0_1674218266592.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_becasv3_es_4.3.0_3.0_1674218266592.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasv3","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_becasv3","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_becasv3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-becasv3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo1_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo1_es.md
new file mode 100644
index 00000000000000..a506561a8c671e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo1_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_modelo1
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-modelo1` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo1_es_4.3.0_3.0_1674218442505.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo1_es_4.3.0_3.0_1674218442505.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo1","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo1","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_modelo1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-modelo1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo2_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo2_es.md
new file mode 100644
index 00000000000000..7d5336018fd34b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo2_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_modelo2
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-modelo2` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo2_es_4.3.0_3.0_1674218501375.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo2_es_4.3.0_3.0_1674218501375.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo2","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo2","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_modelo2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-modelo2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1_es.md
new file mode 100644
index 00000000000000..eff5a29b02c9a6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_modelo_v1
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-modelo-robertav1` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo_v1_es_4.3.0_3.0_1674218326080.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo_v1_es_4.3.0_3.0_1674218326080.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo_v1","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo_v1","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_modelo_v1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-modelo-robertav1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1b3_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1b3_es.md
new file mode 100644
index 00000000000000..11f324402fbc44
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_modelo_v1b3_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_modelo_v1b3
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-modelo-robertav1b3` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo_v1b3_es_4.3.0_3.0_1674218384530.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_modelo_v1b3_es_4.3.0_3.0_1674218384530.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo_v1b3","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_modelo_v1b3","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_modelo_v1b3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-modelo-robertav1b3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_v2_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_v2_es.md
new file mode 100644
index 00000000000000..b3e80c953fb62b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_spanish_squades_v2_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_base_spanish_squades_v2
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-spanish-squades-robertav2` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_v2_es_4.3.0_3.0_1674218559260.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_spanish_squades_v2_es_4.3.0_3.0_1674218559260.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_v2","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_spanish_squades_v2","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_spanish_squades_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/roberta-base-spanish-squades-robertav2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2.0_en.md
new file mode 100644
index 00000000000000..4d14c8d54b221d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_base_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2.0_en_4.3.0_3.0_1674219848563.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2.0_en_4.3.0_3.0_1674219848563.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/roberta-base_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_boolq_baseline_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_boolq_baseline_en.md
new file mode 100644
index 00000000000000..57374a46dd6717
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_boolq_baseline_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from shahrukhx01)
+author: John Snow Labs
+name: roberta_qa_base_squad2_boolq_baseline
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-boolq-baseline` is a English model originally trained by `shahrukhx01`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_boolq_baseline_en_4.3.0_3.0_1674219076102.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_boolq_baseline_en_4.3.0_3.0_1674219076102.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_boolq_baseline","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_boolq_baseline","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_boolq_baseline|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/shahrukhx01/roberta-base-squad2-boolq-baseline
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_en.md
new file mode 100644
index 00000000000000..c756103d017385
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from twmkn9)
+author: John Snow Labs
+name: roberta_qa_base_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-squad2` is a English model originally trained by `twmkn9`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_en_4.3.0_3.0_1674210478798.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_en_4.3.0_3.0_1674210478798.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/twmkn9/distilroberta-base-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_sel_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_sel_en.md
new file mode 100644
index 00000000000000..2a26d236171a8c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_sel_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Teepika)
+author: John Snow Labs
+name: roberta_qa_base_squad2_finetuned_sel
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-selqa` is a English model originally trained by `Teepika`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_sel_en_4.3.0_3.0_1674219133811.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_sel_en_4.3.0_3.0_1674219133811.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_sel","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_sel","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_finetuned_sel|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Teepika/roberta-base-squad2-finetuned-selqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_squad2_en.md
new file mode 100644
index 00000000000000..8e298a6909fd4d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Marscen)
+author: John Snow Labs
+name: roberta_qa_base_squad2_finetuned_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad2` is a English model originally trained by `Marscen`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_squad2_en_4.3.0_3.0_1674219552956.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_squad2_en_4.3.0_3.0_1674219552956.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_finetuned_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Marscen/roberta-base-squad2-finetuned-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_visquad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_visquad_en.md
new file mode 100644
index 00000000000000..03824fb45b7ebc
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_finetuned_visquad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from khoanvm)
+author: John Snow Labs
+name: roberta_qa_base_squad2_finetuned_visquad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-visquad` is a English model originally trained by `khoanvm`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_visquad_en_4.3.0_3.0_1674219613502.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_finetuned_visquad_en_4.3.0_3.0_1674219613502.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_visquad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_finetuned_visquad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_finetuned_visquad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/khoanvm/roberta-base-squad2-finetuned-visquad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_nq_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_nq_en.md
new file mode 100644
index 00000000000000..c869fed5d040a9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_nq_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from nlpconnect)
+author: John Snow Labs
+name: roberta_qa_base_squad2_nq
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-nq` is a English model originally trained by `nlpconnect`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_nq_en_4.3.0_3.0_1674219670141.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_nq_en_4.3.0_3.0_1674219670141.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_nq","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_nq","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_nq|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nlpconnect/roberta-base-squad2-nq
+- https://paperswithcode.com/sota?task=Question+Answering&dataset=squad_v2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_10e_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_10e_en.md
new file mode 100644
index 00000000000000..dc56b97a8f652c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_10e_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from venkateshdas)
+author: John Snow Labs
+name: roberta_qa_base_squad2_ta_qna_10e
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-ta-qna-roberta10e` is a English model originally trained by `venkateshdas`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_ta_qna_10e_en_4.3.0_3.0_1674219728754.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_ta_qna_10e_en_4.3.0_3.0_1674219728754.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_ta_qna_10e","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_ta_qna_10e","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_ta_qna_10e|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/venkateshdas/roberta-base-squad2-ta-qna-roberta10e
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_3e_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_3e_en.md
new file mode 100644
index 00000000000000..4370a4407c6a77
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad2_ta_qna_3e_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from venkateshdas)
+author: John Snow Labs
+name: roberta_qa_base_squad2_ta_qna_3e
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-ta-qna-roberta3e` is a English model originally trained by `venkateshdas`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_ta_qna_3e_en_4.3.0_3.0_1674219788427.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad2_ta_qna_3e_en_4.3.0_3.0_1674219788427.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_ta_qna_3e","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad2_ta_qna_3e","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad2_ta_qna_3e|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/venkateshdas/roberta-base-squad2-ta-qna-roberta3e
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_en.md
new file mode 100644
index 00000000000000..b94aa125e14fab
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Palak)
+author: John Snow Labs
+name: roberta_qa_base_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base_squad` is a English model originally trained by `Palak`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_en_4.3.0_3.0_1674210601142.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_en_4.3.0_3.0_1674210601142.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Palak/distilroberta-base_squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_on_runaways_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_on_runaways_en.md
new file mode 100644
index 00000000000000..99d0c69bb25e18
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_on_runaways_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Nadav)
+author: John Snow Labs
+name: roberta_qa_base_squad_finetuned_on_runaways
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad-finetuned-on-runaways-en` is a English model originally trained by `Nadav`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_finetuned_on_runaways_en_4.3.0_3.0_1674218728000.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_finetuned_on_runaways_en_4.3.0_3.0_1674218728000.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad_finetuned_on_runaways","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad_finetuned_on_runaways","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad_finetuned_on_runaways|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Nadav/roberta-base-squad-finetuned-on-runaways-en
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_squad_en.md
new file mode 100644
index 00000000000000..2e9f1b9b509add
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Swty)
+author: John Snow Labs
+name: roberta_qa_base_squad_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad-finetuned-squad` is a English model originally trained by `Swty`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_finetuned_squad_en_4.3.0_3.0_1674218785890.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_finetuned_squad_en_4.3.0_3.0_1674218785890.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Swty/roberta-base-squad-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_nl.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_nl.md
new file mode 100644
index 00000000000000..2674babc920c13
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_squad_nl.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Dutch RobertaForQuestionAnswering Base Cased model (from Nadav)
+author: John Snow Labs
+name: roberta_qa_base_squad
+date: 2023-01-20
+tags: [nl, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: nl
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad-nl` is a Dutch model originally trained by `Nadav`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_nl_4.3.0_3.0_1674218840970.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_squad_nl_4.3.0_3.0_1674218840970.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad","nl")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_squad","nl")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|nl|
+|Size:|436.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Nadav/roberta-base-squad-nl
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_1_en.md
new file mode 100644
index 00000000000000..5cdc94c62932e3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_1_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from nbroad)
+author: John Snow Labs
+name: roberta_qa_base_super_1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rob-base-superqa1` is a English model originally trained by `nbroad`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_super_1_en_4.3.0_3.0_1674212339330.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_super_1_en_4.3.0_3.0_1674212339330.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_super_1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_super_1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_super_1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nbroad/rob-base-superqa1
+- https://paperswithcode.com/sota?task=Question+Answering&dataset=adversarial_qa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_2_en.md
new file mode 100644
index 00000000000000..b573d0ea37758b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_super_2_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from nbroad)
+author: John Snow Labs
+name: roberta_qa_base_super_2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rob-base-superqa2` is a English model originally trained by `nbroad`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_super_2_en_4.3.0_3.0_1674212395438.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_super_2_en_4.3.0_3.0_1674212395438.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_super_2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_super_2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_super_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nbroad/rob-base-superqa2
+- https://paperswithcode.com/sota?task=Question+Answering&dataset=squad_v2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_task_specific_distilation_on_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_task_specific_distilation_on_squad_en.md
new file mode 100644
index 00000000000000..f6ff4b920e5f26
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_task_specific_distilation_on_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_base_task_specific_distilation_on_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-task-specific-distilation-on-squad` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_task_specific_distilation_on_squad_en_4.3.0_3.0_1674210519978.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_task_specific_distilation_on_squad_en_4.3.0_3.0_1674210519978.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_task_specific_distilation_on_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_task_specific_distilation_on_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_task_specific_distilation_on_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/distilroberta-base-task-specific-distilation-on-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_tweet_model_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_tweet_model_en.md
new file mode 100644
index 00000000000000..4ce45184023538
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_tweet_model_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from saburbutt)
+author: John Snow Labs
+name: roberta_qa_base_tweet_model
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta_base_tweetqa_model` is a English model originally trained by `saburbutt`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_tweet_model_en_4.3.0_3.0_1674223144072.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_tweet_model_en_4.3.0_3.0_1674223144072.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_tweet_model","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_tweet_model","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_tweet_model|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|432.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/saburbutt/roberta_base_tweetqa_model
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_uncased_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_uncased_squad_en.md
new file mode 100644
index 00000000000000..862892b86a046b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_base_uncased_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Uncased model (from t15)
+author: John Snow Labs
+name: roberta_qa_base_uncased_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-base-uncased-squad` is a English model originally trained by `t15`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_base_uncased_squad_en_4.3.0_3.0_1674210559838.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_base_uncased_squad_en_4.3.0_3.0_1674210559838.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_uncased_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_base_uncased_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_base_uncased_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|false|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/t15/distilroberta-base-uncased-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_becasv4.1_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_becasv4.1_es.md
new file mode 100644
index 00000000000000..9205d78fc7ee18
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_becasv4.1_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Cased model (from Evelyn18)
+author: John Snow Labs
+name: roberta_qa_becasv4.1
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `BECASV4.1` is a Spanish model originally trained by `Evelyn18`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_becasv4.1_es_4.3.0_3.0_1674207847912.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_becasv4.1_es_4.3.0_3.0_1674207847912.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_becasv4.1","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_becasv4.1","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_becasv4.1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|459.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Evelyn18/BECASV4.1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_bertserini_base_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_bertserini_base_en.md
new file mode 100644
index 00000000000000..6f21982ef19df4
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_bertserini_base_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from rsvp-ai)
+author: John Snow Labs
+name: roberta_qa_bertserini_base
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `bertserini-roberta-base` is a English model originally trained by `rsvp-ai`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_bertserini_base_en_4.3.0_3.0_1674209264464.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_bertserini_base_en_4.3.0_3.0_1674209264464.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_bertserini_base","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_bertserini_base","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_bertserini_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|449.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/rsvp-ai/bertserini-roberta-base
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ca_v2_squac_ca_catalan_ca.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ca_v2_squac_ca_catalan_ca.md
new file mode 100644
index 00000000000000..05e0e778e077bc
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ca_v2_squac_ca_catalan_ca.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Catalan RobertaForQuestionAnswering Cased model (from crodri)
+author: John Snow Labs
+name: roberta_qa_ca_v2_squac_ca_catalan
+date: 2023-01-20
+tags: [ca, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: ca
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-ca-v2-qa-squac-ca-catalanqa` is a Catalan model originally trained by `crodri`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ca_v2_squac_ca_catalan_ca_4.3.0_3.0_1674219909583.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ca_v2_squac_ca_catalan_ca_4.3.0_3.0_1674219909583.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ca_v2_squac_ca_catalan","ca")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ca_v2_squac_ca_catalan","ca")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ca_v2_squac_ca_catalan|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|ca|
+|Size:|461.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/crodri/roberta-ca-v2-qa-squac-ca-catalanqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_canard_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_canard_en.md
new file mode 100644
index 00000000000000..5dbb2e78b76ac2
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_canard_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from peggyhuang)
+author: John Snow Labs
+name: roberta_qa_canard
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-canard` is a English model originally trained by `peggyhuang`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_canard_en_4.3.0_3.0_1674219967411.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_canard_en_4.3.0_3.0_1674219967411.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_canard","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_canard","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_canard|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|465.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/peggyhuang/roberta-canard
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_checkpoint_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_checkpoint_finetuned_squad_en.md
new file mode 100644
index 00000000000000..edaae508b23fdf
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_checkpoint_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from xinranyyyy)
+author: John Snow Labs
+name: roberta_qa_checkpoint_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta_checkpoint-finetuned-squad` is a English model originally trained by `xinranyyyy`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_checkpoint_finetuned_squad_en_4.3.0_3.0_1674223200816.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_checkpoint_finetuned_squad_en_4.3.0_3.0_1674223200816.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_checkpoint_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_checkpoint_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_checkpoint_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|465.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/xinranyyyy/roberta_checkpoint-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_chiendvhust_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_chiendvhust_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..dbd376bf09489f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_chiendvhust_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from chiendvhust)
+author: John Snow Labs
+name: roberta_qa_chiendvhust_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `chiendvhust`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_chiendvhust_base_finetuned_squad_en_4.3.0_3.0_1674217182008.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_chiendvhust_base_finetuned_squad_en_4.3.0_3.0_1674217182008.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_chiendvhust_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_chiendvhust_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_chiendvhust_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|457.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/chiendvhust/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cline_emanuals_tech_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cline_emanuals_tech_en.md
new file mode 100644
index 00000000000000..4aa81b68b0003d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cline_emanuals_tech_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_cline_emanuals_tech
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `cline-emanuals-techqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_cline_emanuals_tech_en_4.3.0_3.0_1674209326690.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_cline_emanuals_tech_en_4.3.0_3.0_1674209326690.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cline_emanuals_tech","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cline_emanuals_tech","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_cline_emanuals_tech|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/cline-emanuals-techqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_custom_squad_ds_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_custom_squad_ds_en.md
new file mode 100644
index 00000000000000..4f2ae8246caf05
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_custom_squad_ds_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from sunitha)
+author: John Snow Labs
+name: roberta_qa_custom_squad_ds
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Roberta_Custom_Squad_DS` is a English model originally trained by `sunitha`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_custom_squad_ds_en_4.3.0_3.0_1674208785165.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_custom_squad_ds_en_4.3.0_3.0_1674208785165.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_custom_squad_ds","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_custom_squad_ds","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_custom_squad_ds|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sunitha/Roberta_Custom_Squad_DS
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_customds_finetune_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_customds_finetune_en.md
new file mode 100644
index 00000000000000..7eb207028a6d34
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_customds_finetune_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from sunitha)
+author: John Snow Labs
+name: roberta_qa_customds_finetune
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-customds-finetune` is a English model originally trained by `sunitha`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_customds_finetune_en_4.3.0_3.0_1674220035505.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_customds_finetune_en_4.3.0_3.0_1674220035505.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_customds_finetune","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_customds_finetune","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_customds_finetune|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sunitha/roberta-customds-finetune
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_custom_ds_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_custom_ds_en.md
new file mode 100644
index 00000000000000..9a12fbe60e2d95
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_custom_ds_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from sunitha)
+author: John Snow Labs
+name: roberta_qa_cv_custom_ds
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `CV_Custom_DS` is a English model originally trained by `sunitha`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_cv_custom_ds_en_4.3.0_3.0_1674207905368.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_cv_custom_ds_en_4.3.0_3.0_1674207905368.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cv_custom_ds","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cv_custom_ds","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_cv_custom_ds|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sunitha/CV_Custom_DS
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_merge_ds_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_merge_ds_en.md
new file mode 100644
index 00000000000000..a3f2746b6b2cae
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cv_merge_ds_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from sunitha)
+author: John Snow Labs
+name: roberta_qa_cv_merge_ds
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `CV_Merge_DS` is a English model originally trained by `sunitha`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_cv_merge_ds_en_4.3.0_3.0_1674207962257.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_cv_merge_ds_en_4.3.0_3.0_1674207962257.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cv_merge_ds","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cv_merge_ds","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_cv_merge_ds|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sunitha/CV_Merge_DS
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cyberlandr_door_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cyberlandr_door_en.md
new file mode 100644
index 00000000000000..4f40fdfa7b8962
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_cyberlandr_door_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from billfrench)
+author: John Snow Labs
+name: roberta_qa_cyberlandr_door
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `cyberlandr-door` is a English model originally trained by `billfrench`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_cyberlandr_door_en_4.3.0_3.0_1674209416902.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_cyberlandr_door_en_4.3.0_3.0_1674209416902.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cyberlandr_door","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_cyberlandr_door","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_cyberlandr_door|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|414.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/billfrench/cyberlandr-door
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepakvk_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepakvk_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..8d3b8b49fb5381
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepakvk_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from deepakvk)
+author: John Snow Labs
+name: roberta_qa_deepakvk_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `deepakvk`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepakvk_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219250943.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepakvk_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219250943.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepakvk_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepakvk_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepakvk_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/deepakvk/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_1e_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_1e_4_en.md
new file mode 100644
index 00000000000000..4bc782861e1ae1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_1e_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_how_1e_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-how-1e-4` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_how_1e_4_en_4.3.0_3.0_1674209475140.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_how_1e_4_en_4.3.0_3.0_1674209475140.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_how_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_how_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_how_1e_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-how-1e-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_5e_05_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_5e_05_en.md
new file mode 100644
index 00000000000000..4c41a1875c89de
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_how_5e_05_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_how_5e_05
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-how-5e-05` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_how_5e_05_en_4.3.0_3.0_1674209532316.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_how_5e_05_en_4.3.0_3.0_1674209532316.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_how_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_how_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_how_5e_05|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-how-5e-05
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_1e_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_1e_4_en.md
new file mode 100644
index 00000000000000..caaccb428df110
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_1e_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_no_label_1e_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-no-label-1e-4` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_no_label_1e_4_en_4.3.0_3.0_1674209605485.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_no_label_1e_4_en_4.3.0_3.0_1674209605485.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_no_label_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_no_label_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_no_label_1e_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-no-label-1e-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_5e_05_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_5e_05_en.md
new file mode 100644
index 00000000000000..938352b1d97b33
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_no_label_5e_05_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_no_label_5e_05
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-no-label-5e-05` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_no_label_5e_05_en_4.3.0_3.0_1674209664953.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_no_label_5e_05_en_4.3.0_3.0_1674209664953.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_no_label_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_no_label_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_no_label_5e_05|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-no-label-5e-05
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_what_1e_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_what_1e_4_en.md
new file mode 100644
index 00000000000000..37f52ee2a4b38f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_what_1e_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_what_1e_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-what-1e-4` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_what_1e_4_en_4.3.0_3.0_1674209722792.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_what_1e_4_en_4.3.0_3.0_1674209722792.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_what_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_what_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_what_1e_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-what-1e-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_1e_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_1e_4_en.md
new file mode 100644
index 00000000000000..4f3ca4c5e441a1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_1e_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_which_1e_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-which-1e-4` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_which_1e_4_en_4.3.0_3.0_1674209779299.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_which_1e_4_en_4.3.0_3.0_1674209779299.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_which_1e_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_which_1e_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_which_1e_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-which-1e-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_5e_05_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_5e_05_en.md
new file mode 100644
index 00000000000000..6b777da35df3ce
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_deepset_base_squad2_orkg_which_5e_05_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Moussab)
+author: John Snow Labs
+name: roberta_qa_deepset_base_squad2_orkg_which_5e_05
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `deepset-roberta-base-squad2-orkg-which-5e-05` is a English model originally trained by `Moussab`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_which_5e_05_en_4.3.0_3.0_1674209836673.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_deepset_base_squad2_orkg_which_5e_05_en_4.3.0_3.0_1674209836673.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_which_5e_05","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_deepset_base_squad2_orkg_which_5e_05","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_deepset_base_squad2_orkg_which_5e_05|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Moussab/deepset-roberta-base-squad2-orkg-which-5e-05
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_discord_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_discord_en.md
new file mode 100644
index 00000000000000..6973bc1077ed86
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_discord_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Salesforce)
+author: John Snow Labs
+name: roberta_qa_discord
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `discord_qa` is a English model originally trained by `Salesforce`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_discord_en_4.3.0_3.0_1674210253072.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_discord_en_4.3.0_3.0_1674210253072.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_discord","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_discord","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_discord|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|845.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Salesforce/discord_qa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_distilrobert_base_squadv2_328seq_128stride_test_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_distilrobert_base_squadv2_328seq_128stride_test_en.md
new file mode 100644
index 00000000000000..ab8bdafb9b025f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_distilrobert_base_squadv2_328seq_128stride_test_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from manishiitg)
+author: John Snow Labs
+name: roberta_qa_distilrobert_base_squadv2_328seq_128stride_test
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilrobert-base-squadv2-328seq-128stride-test` is a English model originally trained by `manishiitg`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_distilrobert_base_squadv2_328seq_128stride_test_en_4.3.0_3.0_1674210316686.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_distilrobert_base_squadv2_328seq_128stride_test_en_4.3.0_3.0_1674210316686.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_distilrobert_base_squadv2_328seq_128stride_test","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_distilrobert_base_squadv2_328seq_128stride_test","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_distilrobert_base_squadv2_328seq_128stride_test|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/manishiitg/distilrobert-base-squadv2-328seq-128stride-test
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_en.md
new file mode 100644
index 00000000000000..57db76612331a9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from nlpconnect)
+author: John Snow Labs
+name: roberta_qa_dpr_nq_reader_base
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `dpr-nq-reader-roberta-base` is a English model originally trained by `nlpconnect`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_dpr_nq_reader_base_en_4.3.0_3.0_1674210699630.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_dpr_nq_reader_base_en_4.3.0_3.0_1674210699630.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dpr_nq_reader_base","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dpr_nq_reader_base","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_dpr_nq_reader_base|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nlpconnect/dpr-nq-reader-roberta-base
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_v2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_v2_en.md
new file mode 100644
index 00000000000000..4e5671d1d0c898
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dpr_nq_reader_base_v2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from nlpconnect)
+author: John Snow Labs
+name: roberta_qa_dpr_nq_reader_base_v2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `dpr-nq-reader-roberta-base-v2` is a English model originally trained by `nlpconnect`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_dpr_nq_reader_base_v2_en_4.3.0_3.0_1674210757247.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_dpr_nq_reader_base_v2_en_4.3.0_3.0_1674210757247.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dpr_nq_reader_base_v2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dpr_nq_reader_base_v2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_dpr_nq_reader_base_v2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nlpconnect/dpr-nq-reader-roberta-base-v2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dynamic_minilmv2_l6_h384_squad1.1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dynamic_minilmv2_l6_h384_squad1.1_en.md
new file mode 100644
index 00000000000000..8dcfa7b4418c19
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_dynamic_minilmv2_l6_h384_squad1.1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Mini Cased model (from sguskin)
+author: John Snow Labs
+name: roberta_qa_dynamic_minilmv2_l6_h384_squad1.1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `dynamic-minilmv2-L6-H384-squad1.1` is a English model originally trained by `sguskin`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_dynamic_minilmv2_l6_h384_squad1.1_en_4.3.0_3.0_1674210790213.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_dynamic_minilmv2_l6_h384_squad1.1_en_4.3.0_3.0_1674210790213.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dynamic_minilmv2_l6_h384_squad1.1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_dynamic_minilmv2_l6_h384_squad1.1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_dynamic_minilmv2_l6_h384_squad1.1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|112.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sguskin/dynamic-minilmv2-L6-H384-squad1.1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_eda_and_parav3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_eda_and_parav3_en.md
new file mode 100644
index 00000000000000..1436f1dcdff391
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_eda_and_parav3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from comacrae)
+author: John Snow Labs
+name: roberta_qa_eda_and_parav3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-eda-and-parav3` is a English model originally trained by `comacrae`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_eda_and_parav3_en_4.3.0_3.0_1674220091398.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_eda_and_parav3_en_4.3.0_3.0_1674220091398.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_eda_and_parav3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_eda_and_parav3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_eda_and_parav3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/comacrae/roberta-eda-and-parav3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edav3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edav3_en.md
new file mode 100644
index 00000000000000..8ff9a7a6058449
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edav3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from comacrae)
+author: John Snow Labs
+name: roberta_qa_edav3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-edav3` is a English model originally trained by `comacrae`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_edav3_en_4.3.0_3.0_1674220150986.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_edav3_en_4.3.0_3.0_1674220150986.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_edav3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_edav3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_edav3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/comacrae/roberta-edav3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edtech_model1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edtech_model1_en.md
new file mode 100644
index 00000000000000..5d04d4e4e00711
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_edtech_model1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from phanimvsk)
+author: John Snow Labs
+name: roberta_qa_edtech_model1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Edtech_model1` is a English model originally trained by `phanimvsk`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_edtech_model1_en_4.3.0_3.0_1674208077877.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_edtech_model1_en_4.3.0_3.0_1674208077877.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_edtech_model1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_edtech_model1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_edtech_model1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|463.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/phanimvsk/Edtech_model1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emanuals_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emanuals_squad2.0_en.md
new file mode 100644
index 00000000000000..8abe6b55980b23
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emanuals_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_emanuals_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `EManuals_RoBERTa_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_emanuals_squad2.0_en_4.3.0_3.0_1674208019986.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_emanuals_squad2.0_en_4.3.0_3.0_1674208019986.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_emanuals_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_emanuals_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_emanuals_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/EManuals_RoBERTa_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emotion_extraction_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emotion_extraction_en.md
new file mode 100644
index 00000000000000..ed449c2ea133d4
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_emotion_extraction_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Nakul24)
+author: John Snow Labs
+name: roberta_qa_emotion_extraction
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `RoBERTa-emotion-extraction` is a English model originally trained by `Nakul24`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_emotion_extraction_en_4.3.0_3.0_1674208609562.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_emotion_extraction_en_4.3.0_3.0_1674208609562.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_emotion_extraction","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_emotion_extraction","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_emotion_extraction|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|426.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Nakul24/RoBERTa-emotion-extraction
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fin_v1_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fin_v1_finetuned_squad_en.md
new file mode 100644
index 00000000000000..16397b6e8f0156
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fin_v1_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from cgou)
+author: John Snow Labs
+name: roberta_qa_fin_v1_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fin_RoBERTa-v1-finetuned-squad` is a English model originally trained by `cgou`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fin_v1_finetuned_squad_en_4.3.0_3.0_1674210819999.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fin_v1_finetuned_squad_en_4.3.0_3.0_1674210819999.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fin_v1_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fin_v1_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fin_v1_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|248.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/cgou/fin_RoBERTa-v1-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_city_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_city_en.md
new file mode 100644
index 00000000000000..69478a5f05b47a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_city_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_city
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-city` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_city_en_4.3.0_3.0_1674220204421.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_city_en_4.3.0_3.0_1674220204421.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_city","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_city","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_city|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-city
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_country_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_country_en.md
new file mode 100644
index 00000000000000..513f06bb743ff1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_country_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from nsridhar)
+author: John Snow Labs
+name: roberta_qa_finetuned_country
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-country` is a English model originally trained by `nsridhar`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_country_en_4.3.0_3.0_1674220262882.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_country_en_4.3.0_3.0_1674220262882.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_country","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_country","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_country|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nsridhar/roberta-finetuned-country
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_facility_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_facility_en.md
new file mode 100644
index 00000000000000..b9a23353e92cdc
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_facility_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_facility
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-facility` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_facility_en_4.3.0_3.0_1674220319873.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_facility_en_4.3.0_3.0_1674220319873.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_facility","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_facility","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_facility|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-facility
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_location_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_location_en.md
new file mode 100644
index 00000000000000..9c963c43511b4d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_location_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_location
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-location` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_location_en_4.3.0_3.0_1674220382399.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_location_en_4.3.0_3.0_1674220382399.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_location","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_location","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_location|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-location
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squad_50k_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squad_50k_en.md
new file mode 100644
index 00000000000000..9abc5fbefcbd53
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squad_50k_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from clementgyj)
+author: John Snow Labs
+name: roberta_qa_finetuned_squad_50k
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-squad-50k` is a English model originally trained by `clementgyj`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_squad_50k_en_4.3.0_3.0_1674220438911.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_squad_50k_en_4.3.0_3.0_1674220438911.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_squad_50k","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_squad_50k","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_squad_50k|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|462.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/clementgyj/roberta-finetuned-squad-50k
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squadv1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squadv1_en.md
new file mode 100644
index 00000000000000..6ad9e19eaa2351
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_squadv1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from mrm8488)
+author: John Snow Labs
+name: roberta_qa_finetuned_squadv1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `distilroberta-finetuned-squadv1` is a English model originally trained by `mrm8488`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_squadv1_en_4.3.0_3.0_1674210647091.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_squadv1_en_4.3.0_3.0_1674210647091.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_squadv1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_squadv1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_squadv1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/mrm8488/distilroberta-finetuned-squadv1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state2_en.md
new file mode 100644
index 00000000000000..9e0c551ec6c5d5
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_state2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-state2` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_state2_en_4.3.0_3.0_1674220557287.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_state2_en_4.3.0_3.0_1674220557287.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_state2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_state2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_state2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-state2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state_en.md
new file mode 100644
index 00000000000000..b7ab7bf4ef94c0
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_state_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_state
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-state` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_state_en_4.3.0_3.0_1674220497335.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_state_en_4.3.0_3.0_1674220497335.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_state","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_state","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_state|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-state
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_en.md
new file mode 100644
index 00000000000000..2ecd5e4d0a0229
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_timeentities2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-timeentities2` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities2_en_4.3.0_3.0_1674220671794.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities2_en_4.3.0_3.0_1674220671794.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_timeentities2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|465.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-timeentities2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_ttsp75_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_ttsp75_en.md
new file mode 100644
index 00000000000000..619b29c0205e2f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities2_ttsp75_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_timeentities2_ttsp75
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-timeentities2_ttsp75` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities2_ttsp75_en_4.3.0_3.0_1674220728523.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities2_ttsp75_en_4.3.0_3.0_1674220728523.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities2_ttsp75","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities2_ttsp75","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_timeentities2_ttsp75|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-timeentities2_ttsp75
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities_en.md
new file mode 100644
index 00000000000000..530d6a3f5f3d3f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_finetuned_timeentities_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from skandaonsolve)
+author: John Snow Labs
+name: roberta_qa_finetuned_timeentities
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-finetuned-timeentities` is a English model originally trained by `skandaonsolve`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities_en_4.3.0_3.0_1674220613032.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_finetuned_timeentities_en_4.3.0_3.0_1674220613032.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_finetuned_timeentities","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_finetuned_timeentities|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/skandaonsolve/roberta-finetuned-timeentities
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_firat_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_firat_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..04621300db5a1c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_firat_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Firat)
+author: John Snow Labs
+name: roberta_qa_firat_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `Firat`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_firat_base_finetuned_squad_en_4.3.0_3.0_1674217059391.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_firat_base_finetuned_squad_en_4.3.0_3.0_1674217059391.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_firat_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_firat_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_firat_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Firat/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_event_extraction_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_event_extraction_en.md
new file mode 100644
index 00000000000000..125773a9cb8e93
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_event_extraction_en.md
@@ -0,0 +1,91 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from veronica320)
+author: John Snow Labs
+name: roberta_qa_for_event_extraction
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `QA-for-Event-Extraction` is a English model originally trained by `veronica320`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_for_event_extraction_en_4.3.0_3.0_1674208285851.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_for_event_extraction_en_4.3.0_3.0_1674208285851.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_event_extraction","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_event_extraction","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_for_event_extraction|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/veronica320/QA-for-Event-Extraction
+- https://aclanthology.org/2021.acl-short.42/
+- https://github.com/uwnlp/qamr
+- https://github.com/veronica320/Zeroshot-Event-Extraction
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_question_answering_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_question_answering_en.md
new file mode 100644
index 00000000000000..a602a63f314ab1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_question_answering_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Zamachi)
+author: John Snow Labs
+name: roberta_qa_for_question_answering
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-for-question-answering` is a English model originally trained by `Zamachi`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_for_question_answering_en_4.3.0_3.0_1674220787682.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_for_question_answering_en_4.3.0_3.0_1674220787682.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_question_answering","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_question_answering","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_for_question_answering|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Zamachi/roberta-for-question-answering
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_seizurefrequency_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_seizurefrequency_en.md
new file mode 100644
index 00000000000000..d1fbf61e7dac33
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_for_seizurefrequency_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from CNT-UPenn)
+author: John Snow Labs
+name: roberta_qa_for_seizurefrequency
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `RoBERTa_for_seizureFrequency_QA` is a English model originally trained by `CNT-UPenn`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_for_seizurefrequency_en_4.3.0_3.0_1674208667059.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_for_seizurefrequency_en_4.3.0_3.0_1674208667059.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_seizurefrequency","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_for_seizurefrequency","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_for_seizurefrequency|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/CNT-UPenn/RoBERTa_for_seizureFrequency_QA
+- https://doi.org/10.1093/jamia/ocac018
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_ft_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_ft_news_en.md
new file mode 100644
index 00000000000000..4d00701cf521d1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_ft_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_ft_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_roberta_FT_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_ft_news_en_4.3.0_3.0_1674211000201.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_ft_news_en_4.3.0_3.0_1674211000201.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_ft_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_ft_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_ft_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|458.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_roberta_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_new_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_new_news_en.md
new file mode 100644
index 00000000000000..900c227c7e554d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_new_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_hier_ft_new_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_hier_roberta_FT_new_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_hier_ft_new_news_en_4.3.0_3.0_1674210874319.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_hier_ft_new_news_en_4.3.0_3.0_1674210874319.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_hier_ft_new_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_hier_ft_new_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_hier_ft_new_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|461.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_hier_roberta_FT_new_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_news_en.md
new file mode 100644
index 00000000000000..e7abe85ac47931
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_hier_ft_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_hier_ft_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_hier_roberta_FT_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_hier_ft_news_en_4.3.0_3.0_1674210937245.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_hier_ft_news_en_4.3.0_3.0_1674210937245.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_hier_ft_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_hier_ft_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_hier_ft_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|458.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_hier_roberta_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_pert_sent_0.01_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_pert_sent_0.01_squad2.0_en.md
new file mode 100644
index 00000000000000..352507f59e88dd
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_pert_sent_0.01_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_pert_sent_0.01_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_roberta_pert_sent_0.01_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_pert_sent_0.01_squad2.0_en_4.3.0_3.0_1674211060807.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_pert_sent_0.01_squad2.0_en_4.3.0_3.0_1674211060807.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_pert_sent_0.01_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_pert_sent_0.01_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_pert_sent_0.01_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_roberta_pert_sent_0.01_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_soup_model_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_soup_model_squad2.0_en.md
new file mode 100644
index 00000000000000..b1fcd37c30d2d3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_soup_model_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_soup_model_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_roberta_soup_model_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_soup_model_squad2.0_en_4.3.0_3.0_1674211121560.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_soup_model_squad2.0_en_4.3.0_3.0_1674211121560.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_soup_model_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_soup_model_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_soup_model_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_roberta_soup_model_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_new_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_new_news_en.md
new file mode 100644
index 00000000000000..2a592cd156ed4d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_new_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_triplet_ft_new_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_triplet_roberta_FT_new_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_triplet_ft_new_news_en_4.3.0_3.0_1674211184082.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_triplet_ft_new_news_en_4.3.0_3.0_1674211184082.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_triplet_ft_new_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_triplet_ft_new_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_triplet_ft_new_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|461.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_triplet_roberta_FT_new_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_news_en.md
new file mode 100644
index 00000000000000..39c8596ec5d3b8
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_fpdm_triplet_ft_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_fpdm_triplet_ft_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `fpdm_triplet_roberta_FT_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_triplet_ft_news_en_4.3.0_3.0_1674211248579.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_fpdm_triplet_ft_news_en_4.3.0_3.0_1674211248579.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_triplet_ft_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_fpdm_triplet_ft_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_fpdm_triplet_ft_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|458.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/fpdm_triplet_roberta_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_new_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_new_news_en.md
new file mode 100644
index 00000000000000..f3939188e05659
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_new_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_ft_new_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta_FT_new_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ft_new_news_en_4.3.0_3.0_1674222919626.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ft_new_news_en_4.3.0_3.0_1674222919626.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ft_new_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ft_new_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ft_new_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|461.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/roberta_FT_new_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_news_en.md
new file mode 100644
index 00000000000000..577c94b03a3e29
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ft_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_ft_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta_FT_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ft_news_en_4.3.0_3.0_1674222981909.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ft_news_en_4.3.0_3.0_1674222981909.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ft_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ft_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ft_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|458.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/roberta_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_huxxx657_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_huxxx657_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..0ee56332c583f1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_huxxx657_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from huxxx657)
+author: John Snow Labs
+name: roberta_qa_huxxx657_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `huxxx657`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_huxxx657_base_finetuned_squad_en_4.3.0_3.0_1674217238547.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_huxxx657_base_finetuned_squad_en_4.3.0_3.0_1674217238547.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_huxxx657_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_huxxx657_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_huxxx657_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/huxxx657/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_icebert_is_finetune_is.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_icebert_is_finetune_is.md
new file mode 100644
index 00000000000000..802f84ff363ab3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_icebert_is_finetune_is.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Icelandic RobertaForQuestionAnswering Cased model (from nozagleh)
+author: John Snow Labs
+name: roberta_qa_icebert_is_finetune
+date: 2023-01-20
+tags: [is, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: is
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `IceBERT-QA-Is-finetune` is a Icelandic model originally trained by `nozagleh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_icebert_is_finetune_is_4.3.0_3.0_1674208136981.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_icebert_is_finetune_is_4.3.0_3.0_1674208136981.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_icebert_is_finetune","is")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_icebert_is_finetune","is")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_icebert_is_finetune|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|is|
+|Size:|451.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nozagleh/IceBERT-QA-Is-finetune
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..c3bba4b0605adb
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from janeel)
+author: John Snow Labs
+name: roberta_qa_janeel_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `janeel`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_janeel_base_finetuned_squad_en_4.3.0_3.0_1674217296605.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_janeel_base_finetuned_squad_en_4.3.0_3.0_1674217296605.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_janeel_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_janeel_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_janeel_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/janeel/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_tiny_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_tiny_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..dac2092286b6b3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_janeel_tiny_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Tiny Cased model (from janeel)
+author: John Snow Labs
+name: roberta_qa_janeel_tiny_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `tinyroberta-squad2-finetuned-squad` is a English model originally trained by `janeel`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_janeel_tiny_squad2_finetuned_squad_en_4.3.0_3.0_1674224399345.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_janeel_tiny_squad2_finetuned_squad_en_4.3.0_3.0_1674224399345.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_janeel_tiny_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_janeel_tiny_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_janeel_tiny_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/janeel/tinyroberta-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jgammack_base_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jgammack_base_squad_en.md
new file mode 100644
index 00000000000000..1bbce8ede82dd3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jgammack_base_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from jgammack)
+author: John Snow Labs
+name: roberta_qa_jgammack_base_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad` is a English model originally trained by `jgammack`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_jgammack_base_squad_en_4.3.0_3.0_1674218670079.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_jgammack_base_squad_en_4.3.0_3.0_1674218670079.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_jgammack_base_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_jgammack_base_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_jgammack_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/jgammack/roberta-base-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_joantirant_base_bne_finetuned_s_c_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_joantirant_base_bne_finetuned_s_c_es.md
new file mode 100644
index 00000000000000..ae2e3ee848740f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_joantirant_base_bne_finetuned_s_c_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from JoanTirant)
+author: John Snow Labs
+name: roberta_qa_joantirant_base_bne_finetuned_s_c
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-bne-finetuned-sqac` is a Spanish model originally trained by `JoanTirant`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_joantirant_base_bne_finetuned_s_c_es_4.3.0_3.0_1674212952508.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_joantirant_base_bne_finetuned_s_c_es_4.3.0_3.0_1674212952508.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_joantirant_base_bne_finetuned_s_c","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_joantirant_base_bne_finetuned_s_c","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_joantirant_base_bne_finetuned_s_c|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/JoanTirant/roberta-base-bne-finetuned-sqac
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jonatangk_base_bne_finetuned_s_c_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jonatangk_base_bne_finetuned_s_c_es.md
new file mode 100644
index 00000000000000..20e792e2ada72e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_jonatangk_base_bne_finetuned_s_c_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Base Cased model (from JonatanGk)
+author: John Snow Labs
+name: roberta_qa_jonatangk_base_bne_finetuned_s_c
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-bne-finetuned-sqac` is a Spanish model originally trained by `JonatanGk`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_jonatangk_base_bne_finetuned_s_c_es_4.3.0_3.0_1674213010026.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_jonatangk_base_bne_finetuned_s_c_es_4.3.0_3.0_1674213010026.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_jonatangk_base_bne_finetuned_s_c","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_jonatangk_base_bne_finetuned_s_c","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_jonatangk_base_bne_finetuned_s_c|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/JonatanGk/roberta-base-bne-finetuned-sqac
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_l_squadv1.1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_l_squadv1.1_en.md
new file mode 100644
index 00000000000000..58a9b4828f7f0c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_l_squadv1.1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from vuiseng9)
+author: John Snow Labs
+name: roberta_qa_l_squadv1.1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-l-squadv1.1` is a English model originally trained by `vuiseng9`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_l_squadv1.1_en_4.3.0_3.0_1674220913524.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_l_squadv1.1_en_4.3.0_3.0_1674220913524.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_l_squadv1.1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_l_squadv1.1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_l_squadv1.1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/vuiseng9/roberta-l-squadv1.1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_0_en.md
new file mode 100644
index 00000000000000..31fd24d215ab48
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_large_data_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-data-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_data_seed_0_en_4.3.0_3.0_1674221077269.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_data_seed_0_en_4.3.0_3.0_1674221077269.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_data_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_data_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_data_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-large-data-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_4_en.md
new file mode 100644
index 00000000000000..be6e760daa764b
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_data_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_large_data_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-data-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_data_seed_4_en_4.3.0_3.0_1674221236388.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_data_seed_4_en_4.3.0_3.0_1674221236388.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_data_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_data_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_data_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-large-data-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4_en.md
new file mode 100644
index 00000000000000..acb9c127b4108d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-few-shot-k-1024-finetuned-squad-seed-4` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4_en_4.3.0_3.0_1674221411536.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4_en_4.3.0_3.0_1674221411536.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_few_shot_k_1024_finetuned_squad_seed_4|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-large-few-shot-k-1024-finetuned-squad-seed-4
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0_en.md
new file mode 100644
index 00000000000000..0fbded5786ca73
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from anas-awadalla)
+author: John Snow Labs
+name: roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-few-shot-k-32-finetuned-squad-seed-0` is a English model originally trained by `anas-awadalla`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0_en_4.3.0_3.0_1674221604163.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0_en_4.3.0_3.0_1674221604163.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_few_shot_k_32_finetuned_squad_seed_0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anas-awadalla/roberta-large-few-shot-k-32-finetuned-squad-seed-0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_fine_tuned_squad_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_fine_tuned_squad_es.md
new file mode 100644
index 00000000000000..56b9617a5c99ae
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_fine_tuned_squad_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Large Cased model (from stevemobs)
+author: John Snow Labs
+name: roberta_qa_large_fine_tuned_squad
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-fine-tuned-squad-es` is a Spanish model originally trained by `stevemobs`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_fine_tuned_squad_es_4.3.0_3.0_1674221753097.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_fine_tuned_squad_es_4.3.0_3.0_1674221753097.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_fine_tuned_squad","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_fine_tuned_squad","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_fine_tuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/stevemobs/roberta-large-fine-tuned-squad-es
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad2_hp_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad2_hp_en.md
new file mode 100644
index 00000000000000..1f461e817ba1c1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad2_hp_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from deepset)
+author: John Snow Labs
+name: roberta_qa_large_squad2_hp
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-squad2-hp` is a English model originally trained by `deepset`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad2_hp_en_4.3.0_3.0_1674222219863.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad2_hp_en_4.3.0_3.0_1674222219863.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad2_hp","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad2_hp","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_squad2_hp|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/deepset/roberta-large-squad2-hp
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_en.md
new file mode 100644
index 00000000000000..ab634182909f46
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from susghosh)
+author: John Snow Labs
+name: roberta_qa_large_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-squad` is a English model originally trained by `susghosh`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad_en_4.3.0_3.0_1674221913718.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad_en_4.3.0_3.0_1674221913718.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/susghosh/roberta-large-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_v1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_v1_en.md
new file mode 100644
index 00000000000000..e01c9ccce508a3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_large_squad_v1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from csarron)
+author: John Snow Labs
+name: roberta_qa_large_squad_v1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-large-squad-v1` is a English model originally trained by `csarron`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad_v1_en_4.3.0_3.0_1674222069233.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_large_squad_v1_en_4.3.0_3.0_1674222069233.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad_v1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_large_squad_v1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_large_squad_v1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/csarron/roberta-large-squad-v1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_li_base_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_li_base_squad2_en.md
new file mode 100644
index 00000000000000..ee416ca7bae6d4
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_li_base_squad2_en.md
@@ -0,0 +1,90 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from Li)
+author: John Snow Labs
+name: roberta_qa_li_base_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2` is a English model originally trained by `Li`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_li_base_squad2_en_4.3.0_3.0_1674218901284.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_li_base_squad2_en_4.3.0_3.0_1674218901284.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_li_base_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_li_base_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_li_base_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|462.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Li/roberta-base-squad2
+- https://rajpurkar.github.io/SQuAD-explorer
+- https://rajpurkar.github.io/SQuAD-explorer/
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_linh101201_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_linh101201_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..97a58f858b5527
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_linh101201_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from linh101201)
+author: John Snow Labs
+name: roberta_qa_linh101201_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `linh101201`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_linh101201_base_finetuned_squad_en_4.3.0_3.0_1674217360099.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_linh101201_base_finetuned_squad_en_4.3.0_3.0_1674217360099.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_linh101201_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_linh101201_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_linh101201_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|424.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/linh101201/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_lorenzkuhn_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_lorenzkuhn_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..a56872c75b5b1a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_lorenzkuhn_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from lorenzkuhn)
+author: John Snow Labs
+name: roberta_qa_lorenzkuhn_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `lorenzkuhn`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_lorenzkuhn_base_finetuned_squad_en_4.3.0_3.0_1674217419784.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_lorenzkuhn_base_finetuned_squad_en_4.3.0_3.0_1674217419784.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_lorenzkuhn_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_lorenzkuhn_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_lorenzkuhn_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/lorenzkuhn/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_base_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_base_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..8aefa83f5c69a4
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_base_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_mask_step_pretraining_base_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mask_step_pretraining_roberta-base_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_mask_step_pretraining_base_squadv2_epochs_3_en_4.3.0_3.0_1674211403400.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_mask_step_pretraining_base_squadv2_epochs_3_en_4.3.0_3.0_1674211403400.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mask_step_pretraining_base_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mask_step_pretraining_base_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_mask_step_pretraining_base_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/mask_step_pretraining_roberta-base_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..c11df83fa70938
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `mask_step_pretraining_recipes-roberta-base_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3_en_4.3.0_3.0_1674211343406.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3_en_4.3.0_3.0_1674211343406.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_mask_step_pretraining_recipes_base_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/mask_step_pretraining_recipes-roberta-base_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mbeck_base_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mbeck_base_squad2_en.md
new file mode 100644
index 00000000000000..bc35f32a60e0da
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_mbeck_base_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from mbeck)
+author: John Snow Labs
+name: roberta_qa_mbeck_base_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2` is a English model originally trained by `mbeck`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_mbeck_base_squad2_en_4.3.0_3.0_1674219019104.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_mbeck_base_squad2_en_4.3.0_3.0_1674219019104.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mbeck_base_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_mbeck_base_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_mbeck_base_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|459.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/mbeck/roberta-base-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_miamiya_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_miamiya_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..1a5e9a34e64b08
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_miamiya_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from miamiya)
+author: John Snow Labs
+name: roberta_qa_miamiya_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `miamiya`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_miamiya_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219308729.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_miamiya_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219308729.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_miamiya_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_miamiya_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_miamiya_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/miamiya/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_minilmv2_l6_h384_squad1.1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_minilmv2_l6_h384_squad1.1_en.md
new file mode 100644
index 00000000000000..27aac175ebf360
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_minilmv2_l6_h384_squad1.1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Mini Cased model (from sguskin)
+author: John Snow Labs
+name: roberta_qa_minilmv2_l6_h384_squad1.1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `minilmv2-L6-H384-squad1.1` is a English model originally trained by `sguskin`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_minilmv2_l6_h384_squad1.1_en_4.3.0_3.0_1674211435898.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_minilmv2_l6_h384_squad1.1_en_4.3.0_3.0_1674211435898.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_minilmv2_l6_h384_squad1.1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_minilmv2_l6_h384_squad1.1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_minilmv2_l6_h384_squad1.1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|112.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/sguskin/minilmv2-L6-H384-squad1.1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_10k_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_10k_en.md
new file mode 100644
index 00000000000000..14685100beee43
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_10k_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from anablasi)
+author: John Snow Labs
+name: roberta_qa_model_10k
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `model_10k_qa` is a English model originally trained by `anablasi`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_model_10k_en_4.3.0_3.0_1674211482662.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_model_10k_en_4.3.0_3.0_1674211482662.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model_10k","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model_10k","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_model_10k|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/anablasi/model_10k_qa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_en.md
new file mode 100644
index 00000000000000..ef18638db0e341
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from choosistant)
+author: John Snow Labs
+name: roberta_qa_model
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qa-model` is a English model originally trained by `choosistant`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_model_en_4.3.0_3.0_1674211832681.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_model_en_4.3.0_3.0_1674211832681.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_model|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/choosistant/qa-model
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_fine_tuned_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_fine_tuned_en.md
new file mode 100644
index 00000000000000..8cafad48723d1e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_model_fine_tuned_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from choosistant)
+author: John Snow Labs
+name: roberta_qa_model_fine_tuned
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `qa-model-fine-tuned` is a English model originally trained by `choosistant`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_model_fine_tuned_en_4.3.0_3.0_1674211892619.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_model_fine_tuned_en_4.3.0_3.0_1674211892619.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model_fine_tuned","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_model_fine_tuned","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_model_fine_tuned|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/choosistant/qa-model-fine-tuned
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad2_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad2_en.md
new file mode 100644
index 00000000000000..8153b33b54c12f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad2_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ModelTC)
+author: John Snow Labs
+name: roberta_qa_modeltc_base_squad2
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2` is a English model originally trained by `ModelTC`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_modeltc_base_squad2_en_4.3.0_3.0_1674218961578.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_modeltc_base_squad2_en_4.3.0_3.0_1674218961578.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_modeltc_base_squad2","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_modeltc_base_squad2","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_modeltc_base_squad2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ModelTC/roberta-base-squad2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad_en.md
new file mode 100644
index 00000000000000..e81c67ac84a641
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_modeltc_base_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ModelTC)
+author: John Snow Labs
+name: roberta_qa_modeltc_base_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad` is a English model originally trained by `ModelTC`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_modeltc_base_squad_en_4.3.0_3.0_1674218615238.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_modeltc_base_squad_en_4.3.0_3.0_1674218615238.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_modeltc_base_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_modeltc_base_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_modeltc_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ModelTC/roberta-base-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ms12345_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ms12345_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..7c8a3980a95aac
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_ms12345_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from ms12345)
+author: John Snow Labs
+name: roberta_qa_ms12345_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `ms12345`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_ms12345_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219374435.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_ms12345_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219374435.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ms12345_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_ms12345_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_ms12345_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/ms12345/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_msms_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_msms_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..2b48e80d4d5713
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_msms_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from msms)
+author: John Snow Labs
+name: roberta_qa_msms_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `msms`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_msms_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219434301.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_msms_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219434301.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_msms_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_msms_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_msms_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/msms/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_muppet_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_muppet_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..0a45071cb2b4d9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_muppet_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from janeel)
+author: John Snow Labs
+name: roberta_qa_muppet_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `muppet-roberta-base-finetuned-squad` is a English model originally trained by `janeel`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_muppet_base_finetuned_squad_en_4.3.0_3.0_1674211542810.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_muppet_base_finetuned_squad_en_4.3.0_3.0_1674211542810.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_muppet_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_muppet_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_muppet_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/janeel/muppet-roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_negation_detector_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_negation_detector_en.md
new file mode 100644
index 00000000000000..a1cb3c91e777a2
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_negation_detector_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Ching)
+author: John Snow Labs
+name: roberta_qa_negation_detector
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `negation_detector` is a English model originally trained by `Ching`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_negation_detector_en_4.3.0_3.0_1674211601485.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_negation_detector_en_4.3.0_3.0_1674211601485.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_negation_detector","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_negation_detector","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_negation_detector|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Ching/negation_detector
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_new_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_new_news_en.md
new file mode 100644
index 00000000000000..b92edf3b8656ce
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_new_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_news_pretrain_ft_new_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `news_pretrain_roberta_FT_new_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_news_pretrain_ft_new_news_en_4.3.0_3.0_1674211658822.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_news_pretrain_ft_new_news_en_4.3.0_3.0_1674211658822.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_news_pretrain_ft_new_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_news_pretrain_ft_new_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_news_pretrain_ft_new_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/news_pretrain_roberta_FT_new_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_news_en.md
new file mode 100644
index 00000000000000..24b2b7c6446066
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_news_pretrain_ft_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_news_pretrain_ft_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `news_pretrain_roberta_FT_newsqa` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_news_pretrain_ft_news_en_4.3.0_3.0_1674211715703.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_news_pretrain_ft_news_en_4.3.0_3.0_1674211715703.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_news_pretrain_ft_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_news_pretrain_ft_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_news_pretrain_ft_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/news_pretrain_roberta_FT_newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_paraphrasev3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_paraphrasev3_en.md
new file mode 100644
index 00000000000000..49768eebd6b30c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_paraphrasev3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from comacrae)
+author: John Snow Labs
+name: roberta_qa_paraphrasev3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-paraphrasev3` is a English model originally trained by `comacrae`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_paraphrasev3_en_4.3.0_3.0_1674222313416.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_paraphrasev3_en_4.3.0_3.0_1674222313416.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_paraphrasev3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_paraphrasev3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_paraphrasev3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/comacrae/roberta-paraphrasev3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_priv_qna_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_priv_qna_en.md
new file mode 100644
index 00000000000000..87cd39c54a92cb
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_priv_qna_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from arjunth2001)
+author: John Snow Labs
+name: roberta_qa_priv_qna
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `priv_qna` is a English model originally trained by `arjunth2001`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_priv_qna_en_4.3.0_3.0_1674211774365.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_priv_qna_en_4.3.0_3.0_1674211774365.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_priv_qna","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_priv_qna","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_priv_qna|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/arjunth2001/priv_qna
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_prk_base_squad2_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_prk_base_squad2_finetuned_squad_en.md
new file mode 100644
index 00000000000000..ae1131bc7c0a1a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_prk_base_squad2_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from prk)
+author: John Snow Labs
+name: roberta_qa_prk_base_squad2_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-squad2-finetuned-squad` is a English model originally trained by `prk`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_prk_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219493892.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_prk_base_squad2_finetuned_squad_en_4.3.0_3.0_1674219493892.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_prk_base_squad2_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_prk_base_squad2_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_prk_base_squad2_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/prk/roberta-base-squad2-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_quales_iberlef_squad_2_es.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_quales_iberlef_squad_2_es.md
new file mode 100644
index 00000000000000..3d6916bf0a0a41
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_quales_iberlef_squad_2_es.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Spanish RobertaForQuestionAnswering Cased model (from stevemobs)
+author: John Snow Labs
+name: roberta_qa_quales_iberlef_squad_2
+date: 2023-01-20
+tags: [es, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: es
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `quales-iberlef-squad_2` is a Spanish model originally trained by `stevemobs`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_quales_iberlef_squad_2_es_4.3.0_3.0_1674212017084.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_quales_iberlef_squad_2_es_4.3.0_3.0_1674212017084.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_quales_iberlef_squad_2","es")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_quales_iberlef_squad_2","es")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_quales_iberlef_squad_2|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|es|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/stevemobs/quales-iberlef-squad_2
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rahulchakwate_base_finetuned_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rahulchakwate_base_finetuned_squad_en.md
new file mode 100644
index 00000000000000..f239f40e152cc7
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rahulchakwate_base_finetuned_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from rahulchakwate)
+author: John Snow Labs
+name: roberta_qa_rahulchakwate_base_finetuned_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-base-finetuned-squad` is a English model originally trained by `rahulchakwate`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rahulchakwate_base_finetuned_squad_en_4.3.0_3.0_1674217478490.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rahulchakwate_base_finetuned_squad_en_4.3.0_3.0_1674217478490.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rahulchakwate_base_finetuned_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rahulchakwate_base_finetuned_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rahulchakwate_base_finetuned_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/rahulchakwate/roberta-base-finetuned-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_re_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_re_en.md
new file mode 100644
index 00000000000000..f54b52bb773ac1
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_re_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from shmuelamar)
+author: John Snow Labs
+name: roberta_qa_re
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `REQA-RoBERTa` is a English model originally trained by `shmuelamar`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_re_en_4.3.0_3.0_1674208450623.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_re_en_4.3.0_3.0_1674208450623.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_re","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_re","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_re|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/shmuelamar/REQA-RoBERTa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..2f649df95025c6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `recipe_triplet_recipes-roberta-base_EASY_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3_en_4.3.0_3.0_1674212163630.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3_en_4.3.0_3.0_1674212163630.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_recipe_triplet_recipes_base_easy_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/recipe_triplet_recipes-roberta-base_EASY_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..2309758f6f0e02
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `recipe_triplet_recipes-roberta-base_EASY_TIMESTEP_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3_en_4.3.0_3.0_1674212108351.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3_en_4.3.0_3.0_1674212108351.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_recipe_triplet_recipes_base_easy_timestep_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/recipe_triplet_recipes-roberta-base_EASY_TIMESTEP_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..5bcbf549a784f6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `recipe_triplet_recipes-roberta-base_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3_en_4.3.0_3.0_1674212279557.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3_en_4.3.0_3.0_1674212279557.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_recipe_triplet_recipes_base_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/recipe_triplet_recipes-roberta-base_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3_en.md
new file mode 100644
index 00000000000000..f33701da4c0b71
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `recipe_triplet_recipes-roberta-base_TIMESTEP_squadv2_epochs_3` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3_en_4.3.0_3.0_1674212220798.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3_en_4.3.0_3.0_1674212220798.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_recipe_triplet_recipes_base_timestep_squadv2_epochs_3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/recipe_triplet_recipes-roberta-base_TIMESTEP_squadv2_epochs_3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robbert_base_squad_finetuned_on_runaways_nl.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robbert_base_squad_finetuned_on_runaways_nl.md
new file mode 100644
index 00000000000000..838a8c587f46ec
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robbert_base_squad_finetuned_on_runaways_nl.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: Dutch RobertaForQuestionAnswering Base Cased model (from Nadav)
+author: John Snow Labs
+name: roberta_qa_robbert_base_squad_finetuned_on_runaways
+date: 2023-01-20
+tags: [nl, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: nl
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `robbert-base-squad-finetuned-on-runaways-nl` is a Dutch model originally trained by `Nadav`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_robbert_base_squad_finetuned_on_runaways_nl_4.3.0_3.0_1674212455477.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_robbert_base_squad_finetuned_on_runaways_nl_4.3.0_3.0_1674212455477.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robbert_base_squad_finetuned_on_runaways","nl")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robbert_base_squad_finetuned_on_runaways","nl")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_robbert_base_squad_finetuned_on_runaways|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|nl|
+|Size:|436.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Nadav/robbert-base-squad-finetuned-on-runaways-nl
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_roberta_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_roberta_en.md
new file mode 100644
index 00000000000000..9ea7a2be785e2e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_roberta_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from nlpunibo)
+author: John Snow Labs
+name: roberta_qa_roberta
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta` is a English model originally trained by `nlpunibo`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_roberta_en_4.3.0_3.0_1674212513043.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_roberta_en_4.3.0_3.0_1674212513043.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_roberta","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_roberta","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_roberta|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|463.6 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/nlpunibo/roberta
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertaabsa_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertaabsa_en.md
new file mode 100644
index 00000000000000..e49770aa709316
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertaabsa_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from LucasS)
+author: John Snow Labs
+name: roberta_qa_robertaabsa
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `robertaABSA` is a English model originally trained by `LucasS`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_robertaabsa_en_4.3.0_3.0_1674222776379.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_robertaabsa_en_4.3.0_3.0_1674222776379.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robertaabsa","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robertaabsa","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_robertaabsa|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|437.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/LucasS/robertaABSA
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertabaseabsa_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertabaseabsa_en.md
new file mode 100644
index 00000000000000..caa0b1e1907faa
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_robertabaseabsa_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from LucasS)
+author: John Snow Labs
+name: roberta_qa_robertabaseabsa
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `robertaBaseABSA` is a English model originally trained by `LucasS`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_robertabaseabsa_en_4.3.0_3.0_1674222849343.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_robertabaseabsa_en_4.3.0_3.0_1674222849343.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robertabaseabsa","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_robertabaseabsa","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_robertabaseabsa|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|437.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/LucasS/robertaBaseABSA
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..2189a6d9b99d87
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_bert_quadruplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223261564.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223261564.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_bert_quadruplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..1c01a3a5923114
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_bert_triplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223322432.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223322432.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_bert_triplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..079ed5c752b8a9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223386005.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223386005.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..eae74358dca7b6
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223448939.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223448939.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_quadruplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..b0192027ba1c8f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223509314.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223509314.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..0bce0e66746ad5
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_triplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223579788.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223579788.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_triplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..a993ab45236986
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_triplet_shuffled_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223641296.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223641296.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_triplet_shuffled_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_triplet_shuffled_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..9917bb9dcc8725
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223704448.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223704448.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_hier_triplet_shuffled_paras_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..4fed39700c5f9c
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223765535.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223765535.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_only_classfn_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..b822be0e06dedb
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223837089.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223837089.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_only_classfn_twostage_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|463.4 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..64b2bc517264d8
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223883289.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223883289.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_twostage_quadruplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|306.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..e3f560d6e4ecda
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223924983.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223924983.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_twostagequadruplet_hier_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|306.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..a0dc484ae5211f
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_twostagetriplet_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223967265.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674223967265.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_twostagetriplet_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|306.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0_en.md
new file mode 100644
index 00000000000000..48ff9cb3be8748
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_squad2.0` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674224008013.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0_en_4.3.0_3.0_1674224008013.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_rule_based_twostagetriplet_hier_epochs_1_shard_1_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|306.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_squad2.0
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_sae_base_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_sae_base_squad_en.md
new file mode 100644
index 00000000000000..6bd2aacc0a3aa9
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_sae_base_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from jgammack)
+author: John Snow Labs
+name: roberta_qa_sae_base_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `SAE-roberta-base-squad` is a English model originally trained by `jgammack`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_sae_base_squad_en_4.3.0_3.0_1674208844842.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_sae_base_squad_en_4.3.0_3.0_1674208844842.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_sae_base_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_sae_base_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_sae_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/jgammack/SAE-roberta-base-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squad_en.md
new file mode 100644
index 00000000000000..be7582a9270407
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Raynok)
+author: John Snow Labs
+name: roberta_qa_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-squad` is a English model originally trained by `Raynok`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_squad_en_4.3.0_3.0_1674222368310.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_squad_en_4.3.0_3.0_1674222368310.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Raynok/roberta-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_base_3_epochs_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_base_3_epochs_en.md
new file mode 100644
index 00000000000000..8cdd8a8df84f82
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_base_3_epochs_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_squadv2_base_3_epochs
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `squadv2-roberta-base-3-epochs` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_base_3_epochs_en_4.3.0_3.0_1674224185355.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_base_3_epochs_en_4.3.0_3.0_1674224185355.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_base_3_epochs","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_base_3_epochs","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_squadv2_base_3_epochs|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|460.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/squadv2-roberta-base-3-epochs
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_3_epochs_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_3_epochs_en.md
new file mode 100644
index 00000000000000..0016e8f8b3e409
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_3_epochs_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_squadv2_recipe_3_epochs
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `squadv2-recipe-roberta-3-epochs` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_recipe_3_epochs_en_4.3.0_3.0_1674224063985.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_recipe_3_epochs_en_4.3.0_3.0_1674224063985.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_recipe_3_epochs","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_recipe_3_epochs","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_squadv2_recipe_3_epochs|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.0 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/squadv2-recipe-roberta-3-epochs
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs_en.md
new file mode 100644
index 00000000000000..8255b8e70b3f0e
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from AnonymousSub)
+author: John Snow Labs
+name: roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `squadv2-recipe-roberta-tokenwise-token-and-step-losses-3-epochs` is a English model originally trained by `AnonymousSub`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs_en_4.3.0_3.0_1674224122519.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs_en_4.3.0_3.0_1674224122519.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_squadv2_recipe_tokenwise_token_and_step_losses_3_epochs|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|467.1 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/AnonymousSub/squadv2-recipe-roberta-tokenwise-token-and-step-losses-3-epochs
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_test_v1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_test_v1_en.md
new file mode 100644
index 00000000000000..adeb0348ab963d
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_test_v1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from Andranik)
+author: John Snow Labs
+name: roberta_qa_test_v1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `TestQaV1` is a English model originally trained by `Andranik`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_test_v1_en_4.3.0_3.0_1674208905005.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_test_v1_en_4.3.0_3.0_1674208905005.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_test_v1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_test_v1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_test_v1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/Andranik/TestQaV1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa3_en.md
new file mode 100644
index 00000000000000..bbcde0ad42e707
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from eAsyle)
+author: John Snow Labs
+name: roberta_qa_testabsa3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `testABSA3` is a English model originally trained by `eAsyle`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_testabsa3_en_4.3.0_3.0_1674224346874.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_testabsa3_en_4.3.0_3.0_1674224346874.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_testabsa3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_testabsa3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_testabsa3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|426.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/eAsyle/testABSA3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa_en.md
new file mode 100644
index 00000000000000..a90489e108cf75
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_testabsa_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from eAsyle)
+author: John Snow Labs
+name: roberta_qa_testabsa
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `testABSA` is a English model originally trained by `eAsyle`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_testabsa_en_4.3.0_3.0_1674224267018.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_testabsa_en_4.3.0_3.0_1674224267018.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_testabsa","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_testabsa","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_testabsa|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|426.2 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/eAsyle/testABSA
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_random_forquestionanswering_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_random_forquestionanswering_en.md
new file mode 100644
index 00000000000000..a9cabd1718f630
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_random_forquestionanswering_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Tiny Cased model (from hf-internal-testing)
+author: John Snow Labs
+name: roberta_qa_tiny_random_forquestionanswering
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `tiny-random-RobertaForQuestionAnswering` is a English model originally trained by `hf-internal-testing`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_tiny_random_forquestionanswering_en_4.3.0_3.0_1674224369695.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_tiny_random_forquestionanswering_en_4.3.0_3.0_1674224369695.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tiny_random_forquestionanswering","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tiny_random_forquestionanswering","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_tiny_random_forquestionanswering|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|681.7 KB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/hf-internal-testing/tiny-random-RobertaForQuestionAnswering
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_squad2_step1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_squad2_step1_en.md
new file mode 100644
index 00000000000000..836a32d34b2cf2
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tiny_squad2_step1_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Tiny Cased model (from deepset)
+author: John Snow Labs
+name: roberta_qa_tiny_squad2_step1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `tinyroberta-squad2-step1` is a English model originally trained by `deepset`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_tiny_squad2_step1_en_4.3.0_3.0_1674224441422.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_tiny_squad2_step1_en_4.3.0_3.0_1674224441422.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tiny_squad2_step1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tiny_squad2_step1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_tiny_squad2_step1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|307.3 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/deepset/tinyroberta-squad2-step1
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_train_json_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_train_json_en.md
new file mode 100644
index 00000000000000..dcdccc1bf21f46
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_train_json_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from aravind-812)
+author: John Snow Labs
+name: roberta_qa_train_json
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-train-json` is a English model originally trained by `aravind-812`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_train_json_en_4.3.0_3.0_1674222495676.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_train_json_en_4.3.0_3.0_1674222495676.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_train_json","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_train_json","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_train_json|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/aravind-812/roberta-train-json
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tydi_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tydi_en.md
new file mode 100644
index 00000000000000..29f900c0653fbf
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_tydi_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from benny6)
+author: John Snow Labs
+name: roberta_qa_tydi
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-tydiqa` is a English model originally trained by `benny6`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_tydi_en_4.3.0_3.0_1674222584111.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_tydi_en_4.3.0_3.0_1674222584111.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tydi","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_tydi","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_tydi|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|471.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/benny6/roberta-tydiqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugmentedv3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugmentedv3_en.md
new file mode 100644
index 00000000000000..b257468d6363e3
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugmentedv3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from comacrae)
+author: John Snow Labs
+name: roberta_qa_unaugmentedv3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-unaugmentedv3` is a English model originally trained by `comacrae`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unaugmentedv3_en_4.3.0_3.0_1674222641663.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unaugmentedv3_en_4.3.0_3.0_1674222641663.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unaugmentedv3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unaugmentedv3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unaugmentedv3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/comacrae/roberta-unaugmentedv3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugv3_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugv3_en.md
new file mode 100644
index 00000000000000..4e3ef6b86eaa69
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unaugv3_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Cased model (from comacrae)
+author: John Snow Labs
+name: roberta_qa_unaugv3
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `roberta-unaugv3` is a English model originally trained by `comacrae`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unaugv3_en_4.3.0_3.0_1674222699716.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unaugv3_en_4.3.0_3.0_1674222699716.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unaugv3","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unaugv3","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unaugv3|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|464.5 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/comacrae/roberta-unaugv3
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_news_en.md
new file mode 100644
index 00000000000000..3d7b8b119a39cb
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from tli8hf)
+author: John Snow Labs
+name: roberta_qa_unqover_base_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `unqover-roberta-base-newsqa` is a English model originally trained by `tli8hf`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_base_news_en_4.3.0_3.0_1674224491216.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_base_news_en_4.3.0_3.0_1674224491216.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_base_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_base_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unqover_base_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|463.7 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/tli8hf/unqover-roberta-base-newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_squad_en.md
new file mode 100644
index 00000000000000..ae8d5bd136243a
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_base_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from tli8hf)
+author: John Snow Labs
+name: roberta_qa_unqover_base_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `unqover-roberta-base-squad` is a English model originally trained by `tli8hf`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_base_squad_en_4.3.0_3.0_1674224552223.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_base_squad_en_4.3.0_3.0_1674224552223.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_base_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_base_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unqover_base_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|463.8 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/tli8hf/unqover-roberta-base-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_news_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_news_en.md
new file mode 100644
index 00000000000000..1a3db4011e89de
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_news_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from tli8hf)
+author: John Snow Labs
+name: roberta_qa_unqover_large_news
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `unqover-roberta-large-newsqa` is a English model originally trained by `tli8hf`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_large_news_en_4.3.0_3.0_1674224676431.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_large_news_en_4.3.0_3.0_1674224676431.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_large_news","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_large_news","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unqover_large_news|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/tli8hf/unqover-roberta-large-newsqa
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_squad_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_squad_en.md
new file mode 100644
index 00000000000000..4bcced4f018064
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_unqover_large_squad_en.md
@@ -0,0 +1,88 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Large Cased model (from tli8hf)
+author: John Snow Labs
+name: roberta_qa_unqover_large_squad
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `unqover-roberta-large-squad` is a English model originally trained by `tli8hf`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_large_squad_en_4.3.0_3.0_1674224834331.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_unqover_large_squad_en_4.3.0_3.0_1674224834331.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_large_squad","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_unqover_large_squad","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_unqover_large_squad|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|1.3 GB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/tli8hf/unqover-roberta-large-squad
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad1.1_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad1.1_en.md
new file mode 100644
index 00000000000000..9f31702ceb6528
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad1.1_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from microsoft)
+author: John Snow Labs
+name: roberta_qa_xdoc_base_squad1.1
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `xdoc-base-squad1.1` is a English model originally trained by `microsoft`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_xdoc_base_squad1.1_en_4.3.0_3.0_1674224925472.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_xdoc_base_squad1.1_en_4.3.0_3.0_1674224925472.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_xdoc_base_squad1.1","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_xdoc_base_squad1.1","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_xdoc_base_squad1.1|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/microsoft/xdoc-base-squad1.1
+- https://arxiv.org/abs/2210.02849
\ No newline at end of file
diff --git a/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad2.0_en.md b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad2.0_en.md
new file mode 100644
index 00000000000000..a9dfd237dfc675
--- /dev/null
+++ b/docs/_posts/Damla-Gurbaz/2023-01-20-roberta_qa_xdoc_base_squad2.0_en.md
@@ -0,0 +1,89 @@
+---
+layout: model
+title: English RobertaForQuestionAnswering Base Cased model (from microsoft)
+author: John Snow Labs
+name: roberta_qa_xdoc_base_squad2.0
+date: 2023-01-20
+tags: [en, open_source, roberta, question_answering, tensorflow]
+task: Question Answering
+language: en
+edition: Spark NLP 4.3.0
+spark_version: 3.0
+supported: true
+engine: tensorflow
+annotator: RoBertaForQuestionAnswering
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained RobertaForQuestionAnswering model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `xdoc-base-squad2.0` is a English model originally trained by `microsoft`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/roberta_qa_xdoc_base_squad2.0_en_4.3.0_3.0_1674224984469.zip){:.button.button-orange}
+[Copy S3 URI](s3://auxdata.johnsnowlabs.com/public/models/roberta_qa_xdoc_base_squad2.0_en_4.3.0_3.0_1674224984469.zip){:.button.button-orange.button-orange-trans.button-icon.button-copy-s3}
+
+## How to use
+
+
+
+
+{% include programmingLanguageSelectScalaPythonNLU.html %}
+```python
+Document_Assembler = MultiDocumentAssembler()\
+ .setInputCols(["question", "context"])\
+ .setOutputCols(["document_question", "document_context"])
+
+Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_xdoc_base_squad2.0","en")\
+ .setInputCols(["document_question", "document_context"])\
+ .setOutputCol("answer")\
+ .setCaseSensitive(True)
+
+pipeline = Pipeline(stages=[Document_Assembler, Question_Answering])
+
+data = spark.createDataFrame([["What's my name?","My name is Clara and I live in Berkeley."]]).toDF("question", "context")
+
+result = pipeline.fit(data).transform(data)
+```
+```scala
+val Document_Assembler = new MultiDocumentAssembler()
+ .setInputCols(Array("question", "context"))
+ .setOutputCols(Array("document_question", "document_context"))
+
+val Question_Answering = RoBertaForQuestionAnswering.pretrained("roberta_qa_xdoc_base_squad2.0","en")
+ .setInputCols(Array("document_question", "document_context"))
+ .setOutputCol("answer")
+ .setCaseSensitive(true)
+
+val pipeline = new Pipeline().setStages(Array(Document_Assembler, Question_Answering))
+
+val data = Seq("What's my name?","My name is Clara and I live in Berkeley.").toDS.toDF("question", "context")
+
+val result = pipeline.fit(data).transform(data)
+```
+
+
+{:.model-param}
+## Model Information
+
+{:.table-model}
+|---|---|
+|Model Name:|roberta_qa_xdoc_base_squad2.0|
+|Compatibility:|Spark NLP 4.3.0+|
+|License:|Open Source|
+|Edition:|Official|
+|Input Labels:|[document, token]|
+|Output Labels:|[class]|
+|Language:|en|
+|Size:|466.9 MB|
+|Case sensitive:|true|
+|Max sentence length:|256|
+
+## References
+
+- https://huggingface.co/microsoft/xdoc-base-squad2.0
+- https://arxiv.org/abs/2210.02849
\ No newline at end of file
diff --git a/docs/_posts/gadde5300/2023-01-13-bert_embeddings_Italian_Legal_BERT_it.md b/docs/_posts/gadde5300/2023-01-13-bert_embeddings_Italian_Legal_BERT_it.md
new file mode 100644
index 00000000000000..1b01dac775c180
--- /dev/null
+++ b/docs/_posts/gadde5300/2023-01-13-bert_embeddings_Italian_Legal_BERT_it.md
@@ -0,0 +1,74 @@
+---
+layout: model
+title: Italian BERT Embedding Cased model
+author: John Snow Labs
+name: bert_embeddings_Italian_Legal_BERT
+date: 2023-01-13
+tags: [it, open_source, embeddings, bert]
+task: Embeddings
+language: it
+edition: Spark NLP 4.2.7
+spark_version: 3.0
+supported: true
+article_header:
+ type: cover
+use_language_switcher: "Python-Scala-Java"
+---
+
+## Description
+
+Pretrained BERT Embedding model, adapted from Hugging Face and curated to provide scalability and production-readiness using Spark NLP. `Italian-Legal-BERT` is a Italian model originally trained by `dlicari`.
+
+{:.btn-box}
+
+
+[Download](https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/models/bert_embeddings_Italian_Legal_BERT_it_4.2.7_3.0_1673598434160.zip){:.button.button-orange.button-orange-trans.arr.button-icon}
+
+## How to use
+
+
+
+