diff --git a/.lift/ignoreFiles b/.lift/ignoreFiles deleted file mode 100644 index 34b53a7362a0a5..00000000000000 --- a/.lift/ignoreFiles +++ /dev/null @@ -1,341 +0,0 @@ - -# Created by https://www.gitignore.io/api/sbt,java,scala,python,eclipse,intellij,intellij+all - -**/docs/** -docs/** -**/*.min.js -**/*.js -**/*.py -python/** -**/python/** - -### Eclipse ### - -.metadata -bin/ -tmp/ -*.tmp -*.bak -*.swp -*~.nib -local.properties -.settings/ -.loadpath -.recommenders -PubMed* -*cache_pretrained* -*.crc -*.sst -_SUCCESS* -*stages* -*auxdata* -# External tool builders -.externalToolBuilders/ - -# Locally stored "Eclipse launch configurations" -*.launch - -# PyDev specific (Python IDE for Eclipse) -*.pydevproject - -# CDT-specific (C/C++ Development Tooling) -.cproject - -# Java annotation processor (APT) -.factorypath - -# PDT-specific (PHP Development Tools) -.buildpath - -# sbteclipse plugin -.target - -# Tern plugin -.tern-project - -# TeXlipse plugin -.texlipse - -# STS (Spring Tool Suite) -.springBeans - -# Code Recommenders -.recommenders/ - -# Scala IDE specific (Scala & Java development for Eclipse) -.cache-main -.scala_dependencies -.worksheet - -### Eclipse Patch ### -# Eclipse Core -.project - -# JDT-specific (Eclipse Java Development Tools) -.classpath - -### Intellij ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: -.idea/**/workspace.xml -.idea/**/tasks.xml -.idea/dictionaries - -# Sensitive or high-churn files: -.idea/**/dataSources/ -.idea/**/dataSources.ids -.idea/**/dataSources.xml -.idea/**/dataSources.local.xml -.idea/**/sqlDataSources.xml -.idea/**/dynamic.xml -.idea/**/uiDesigner.xml - -# Gradle: -.idea/**/gradle.xml -.idea/**/libraries - -# CMake -cmake-build-debug/ - -# Mongo Explorer plugin: -.idea/**/mongoSettings.xml - -## File-based project format: -*.iws - -## Plugin-specific files: - -# IntelliJ -/out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Cursive Clojure plugin -.idea/replstate.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties - -### Intellij Patch ### -# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 - -*.iml -# modules.xml -# .idea/misc.xml -# *.ipr - -# Sonarlint plugin -.idea/sonarlint - -### Intellij+all ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm -# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 - -# User-specific stuff: - -# Sensitive or high-churn files: - -# Gradle: - -# CMake - -# Mongo Explorer plugin: - -## File-based project format: - -## Plugin-specific files: - -# IntelliJ - -# mpeltonen/sbt-idea plugin - -# JIRA plugin - -# Cursive Clojure plugin - -# Crashlytics plugin (for Android Studio and IntelliJ) - -### Intellij+all Patch ### -# Ignores the whole idea folder -# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 - -.idea/ - -### Java ### -# Compiled class file -*.class - -# Log file -*.log - -# BlueJ files -*.ctxt - -# Mobile Tools for Java (J2ME) -.mtj.tmp/ - -# Package Files # -*.jar -*.war -*.ear -*.zip -*.tar.gz -*.rar - -# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml -hs_err_pid* - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -python/lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ -docs/vendor/ - -# Frontend -docs/_frontend/node_modules -docs/_frontend/static - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ - -### SBT ### -# Simple Build Tool -# http://www.scala-sbt.org/release/docs/Getting-Started/Directories.html#configuring-version-control - -dist/* -lib_managed/ -src_managed/ -project/boot/ -project/plugins/project/ -.history -.lib/ - -### Scala ### - -# End of https://www.gitignore.io/api/sbt,java,scala,python,eclipse,intellij,intellij+all - -### Local ### -tmp_pipeline/ -tmp_symspell/ -test-output-tmp/ -spark-warehouse/ -/python/python.iml -test_crf_pipeline/ -test_*_pipeline/ -*metastore_db* -python/src/ -python/tensorflow/bert/models/** -**/.DS_Store -**/tmp_* -docs/_site/** -docs/.sass-cache/** -tst_shortcut_sd/ -src/*/resources/*.classes -/word_segmenter_metrics/ -/special_class.ser -.bsp/sbt.json -python/docs/_build/** -python/docs/reference/_autosummary/** diff --git a/CHANGELOG b/CHANGELOG index 3e611cda48316f..39e902cfcf7b85 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,14 @@ +======== +5.0.1 +======== +---------------- +Bug Fixes & Enhancements +---------------- +* Fix `multiLabel` param issue in `XXXForSequenceClassitication` and `XXXForZeroShotClassification` annotators +* Add the missing `threshold` param to all `XXXForSequenceClassitication` in Python +* Fix issue with passing `spark.driver.cores` config as a param into start() function in Python and Scala +* Add new notebooks to export BERT, DistilBERT, RoBERTa, and DeBERTa models to ONNX format + ======== 5.0.0 ======== diff --git a/README.md b/README.md index a002cf66f1c285..57c4ce0793a35a 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ To use Spark NLP you need the following requirements: **GPU (optional):** -Spark NLP 5.0.0 is built with ONNX 1.15.1 and TensorFlow 2.7.1 deep learning engines. The minimum following NVIDIA® software are only required for GPU support: +Spark NLP 5.0.1 is built with ONNX 1.15.1 and TensorFlow 2.7.1 deep learning engines. The minimum following NVIDIA® software are only required for GPU support: - NVIDIA® GPU drivers version 450.80.02 or higher - CUDA® Toolkit 11.2 @@ -183,7 +183,7 @@ $ java -version $ conda create -n sparknlp python=3.7 -y $ conda activate sparknlp # spark-nlp by default is based on pyspark 3.x -$ pip install spark-nlp==5.0.0 pyspark==3.3.1 +$ pip install spark-nlp==5.0.1 pyspark==3.3.1 ``` In Python console or Jupyter `Python3` kernel: @@ -228,7 +228,7 @@ For more examples, you can visit our dedicated [examples](https://github.com/Joh ## Apache Spark Support -Spark NLP *5.0.0* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, and 3.4.x +Spark NLP *5.0.1* has been built on top of Apache Spark 3.4 while fully supports Apache Spark 3.0.x, 3.1.x, 3.2.x, 3.3.x, and 3.4.x | Spark NLP | Apache Spark 2.3.x | Apache Spark 2.4.x | Apache Spark 3.0.x | Apache Spark 3.1.x | Apache Spark 3.2.x | Apache Spark 3.3.x | Apache Spark 3.4.x | |-----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------| @@ -267,7 +267,7 @@ Find out more about `Spark NLP` versions from our [release notes](https://github ## Databricks Support -Spark NLP 5.0.0 has been tested and is compatible with the following runtimes: +Spark NLP 5.0.1 has been tested and is compatible with the following runtimes: **CPU:** @@ -325,7 +325,7 @@ Spark NLP 5.0.0 has been tested and is compatible with the following runtimes: ## EMR Support -Spark NLP 5.0.0 has been tested and is compatible with the following EMR releases: +Spark NLP 5.0.1 has been tested and is compatible with the following EMR releases: - emr-6.2.0 - emr-6.3.0 @@ -369,11 +369,11 @@ Spark NLP supports all major releases of Apache Spark 3.0.x, Apache Spark 3.1.x, ```sh # CPU -spark-shell --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 +spark-shell --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 -pyspark --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 +pyspark --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 -spark-submit --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 +spark-submit --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` The `spark-nlp` has been published to @@ -382,11 +382,11 @@ the [Maven Repository](https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/s ```sh # GPU -spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.0 +spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.1 -pyspark --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.0 +pyspark --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.1 -spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.0 +spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:5.0.1 ``` @@ -396,11 +396,11 @@ the [Maven Repository](https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/s ```sh # AArch64 -spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.0 +spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.1 -pyspark --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.0 +pyspark --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.1 -spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.0 +spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-aarch64_2.12:5.0.1 ``` @@ -410,11 +410,11 @@ the [Maven Repository](https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/s ```sh # M1/M2 (Apple Silicon) -spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.0 +spark-shell --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.1 -pyspark --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.0 +pyspark --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.1 -spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.0 +spark-submit --packages com.johnsnowlabs.nlp:spark-nlp-silicon_2.12:5.0.1 ``` @@ -428,7 +428,7 @@ set in your SparkSession: spark-shell \ --driver-memory 16g \ --conf spark.kryoserializer.buffer.max=2000M \ - --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 + --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` ## Scala @@ -446,7 +446,7 @@ coordinates: com.johnsnowlabs.nlp spark-nlp_2.12 - 5.0.0 + 5.0.1 ``` @@ -457,7 +457,7 @@ coordinates: com.johnsnowlabs.nlp spark-nlp-gpu_2.12 - 5.0.0 + 5.0.1 ``` @@ -468,7 +468,7 @@ coordinates: com.johnsnowlabs.nlp spark-nlp-aarch64_2.12 - 5.0.0 + 5.0.1 ``` @@ -479,7 +479,7 @@ coordinates: com.johnsnowlabs.nlp spark-nlp-silicon_2.12 - 5.0.0 + 5.0.1 ``` @@ -489,28 +489,28 @@ coordinates: ```sbtshell // https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/spark-nlp -libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp" % "5.0.0" +libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp" % "5.0.1" ``` **spark-nlp-gpu:** ```sbtshell // https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/spark-nlp-gpu -libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-gpu" % "5.0.0" +libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-gpu" % "5.0.1" ``` **spark-nlp-aarch64:** ```sbtshell // https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/spark-nlp-aarch64 -libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-aarch64" % "5.0.0" +libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-aarch64" % "5.0.1" ``` **spark-nlp-silicon:** ```sbtshell // https://mvnrepository.com/artifact/com.johnsnowlabs.nlp/spark-nlp-silicon -libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-silicon" % "5.0.0" +libraryDependencies += "com.johnsnowlabs.nlp" %% "spark-nlp-silicon" % "5.0.1" ``` Maven @@ -532,7 +532,7 @@ If you installed pyspark through pip/conda, you can install `spark-nlp` through Pip: ```bash -pip install spark-nlp==5.0.0 +pip install spark-nlp==5.0.1 ``` Conda: @@ -561,7 +561,7 @@ spark = SparkSession.builder .config("spark.driver.memory", "16G") .config("spark.driver.maxResultSize", "0") .config("spark.kryoserializer.buffer.max", "2000M") - .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0") + .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1") .getOrCreate() ``` @@ -632,7 +632,7 @@ Use either one of the following options - Add the following Maven Coordinates to the interpreter's library list ```bash -com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 +com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` - Add a path to pre-built jar from [here](#compiled-jars) in the interpreter's library list making sure the jar is @@ -643,7 +643,7 @@ com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 Apart from the previous step, install the python module through pip ```bash -pip install spark-nlp==5.0.0 +pip install spark-nlp==5.0.1 ``` Or you can install `spark-nlp` from inside Zeppelin by using Conda: @@ -671,7 +671,7 @@ launch the Jupyter from the same Python environment: $ conda create -n sparknlp python=3.8 -y $ conda activate sparknlp # spark-nlp by default is based on pyspark 3.x -$ pip install spark-nlp==5.0.0 pyspark==3.3.1 jupyter +$ pip install spark-nlp==5.0.1 pyspark==3.3.1 jupyter $ jupyter notebook ``` @@ -688,7 +688,7 @@ export PYSPARK_PYTHON=python3 export PYSPARK_DRIVER_PYTHON=jupyter export PYSPARK_DRIVER_PYTHON_OPTS=notebook -pyspark --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 +pyspark --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` Alternatively, you can mix in using `--jars` option for pyspark + `pip install spark-nlp` @@ -715,7 +715,7 @@ This script comes with the two options to define `pyspark` and `spark-nlp` versi # -s is for spark-nlp # -g will enable upgrading libcudnn8 to 8.1.0 on Google Colab for GPU usage # by default they are set to the latest -!wget https://setup.johnsnowlabs.com/colab.sh -O - | bash /dev/stdin -p 3.2.3 -s 5.0.0 +!wget https://setup.johnsnowlabs.com/colab.sh -O - | bash /dev/stdin -p 3.2.3 -s 5.0.1 ``` [Spark NLP quick start on Google Colab](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp/blob/master/examples/python/quick_start_google_colab.ipynb) @@ -738,7 +738,7 @@ This script comes with the two options to define `pyspark` and `spark-nlp` versi # -s is for spark-nlp # -g will enable upgrading libcudnn8 to 8.1.0 on Kaggle for GPU usage # by default they are set to the latest -!wget https://setup.johnsnowlabs.com/colab.sh -O - | bash /dev/stdin -p 3.2.3 -s 5.0.0 +!wget https://setup.johnsnowlabs.com/colab.sh -O - | bash /dev/stdin -p 3.2.3 -s 5.0.1 ``` [Spark NLP quick start on Kaggle Kernel](https://www.kaggle.com/mozzie/spark-nlp-named-entity-recognition) is a live @@ -757,9 +757,9 @@ demo on Kaggle Kernel that performs named entity recognitions by using Spark NLP 3. In `Libraries` tab inside your cluster you need to follow these steps: - 3.1. Install New -> PyPI -> `spark-nlp==5.0.0` -> Install + 3.1. Install New -> PyPI -> `spark-nlp==5.0.1` -> Install - 3.2. Install New -> Maven -> Coordinates -> `com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0` -> Install + 3.2. Install New -> Maven -> Coordinates -> `com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1` -> Install 4. Now you can attach your notebook to the cluster and use Spark NLP! @@ -810,7 +810,7 @@ A sample of your software configuration in JSON on S3 (must be public access): "spark.kryoserializer.buffer.max": "2000M", "spark.serializer": "org.apache.spark.serializer.KryoSerializer", "spark.driver.maxResultSize": "0", - "spark.jars.packages": "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0" + "spark.jars.packages": "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1" } }] ``` @@ -819,7 +819,7 @@ A sample of AWS CLI to launch EMR cluster: ```.sh aws emr create-cluster \ ---name "Spark NLP 5.0.0" \ +--name "Spark NLP 5.0.1" \ --release-label emr-6.2.0 \ --applications Name=Hadoop Name=Spark Name=Hive \ --instance-type m4.4xlarge \ @@ -883,7 +883,7 @@ gcloud dataproc clusters create ${CLUSTER_NAME} \ --enable-component-gateway \ --metadata 'PIP_PACKAGES=spark-nlp spark-nlp-display google-cloud-bigquery google-cloud-storage' \ --initialization-actions gs://goog-dataproc-initialization-actions-${REGION}/python/pip-install.sh \ - --properties spark:spark.serializer=org.apache.spark.serializer.KryoSerializer,spark:spark.driver.maxResultSize=0,spark:spark.kryoserializer.buffer.max=2000M,spark:spark.jars.packages=com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 + --properties spark:spark.serializer=org.apache.spark.serializer.KryoSerializer,spark:spark.driver.maxResultSize=0,spark:spark.kryoserializer.buffer.max=2000M,spark:spark.jars.packages=com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` 2. On an existing one, you need to install spark-nlp and spark-nlp-display packages from PyPI. @@ -922,7 +922,7 @@ spark = SparkSession.builder .config("spark.kryoserializer.buffer.max", "2000m") .config("spark.jsl.settings.pretrained.cache_folder", "sample_data/pretrained") .config("spark.jsl.settings.storage.cluster_tmp_dir", "sample_data/storage") - .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0") + .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1") .getOrCreate() ``` @@ -936,7 +936,7 @@ spark-shell \ --conf spark.kryoserializer.buffer.max=2000M \ --conf spark.jsl.settings.pretrained.cache_folder="sample_data/pretrained" \ --conf spark.jsl.settings.storage.cluster_tmp_dir="sample_data/storage" \ - --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 + --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` **pyspark:** @@ -949,7 +949,7 @@ pyspark \ --conf spark.kryoserializer.buffer.max=2000M \ --conf spark.jsl.settings.pretrained.cache_folder="sample_data/pretrained" \ --conf spark.jsl.settings.storage.cluster_tmp_dir="sample_data/storage" \ - --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.0 + --packages com.johnsnowlabs.nlp:spark-nlp_2.12:5.0.1 ``` **Databricks:** @@ -1221,7 +1221,7 @@ spark = SparkSession.builder .config("spark.driver.memory", "16G") .config("spark.driver.maxResultSize", "0") .config("spark.kryoserializer.buffer.max", "2000M") - .config("spark.jars", "/tmp/spark-nlp-assembly-5.0.0.jar") + .config("spark.jars", "/tmp/spark-nlp-assembly-5.0.1.jar") .getOrCreate() ``` @@ -1230,7 +1230,7 @@ spark = SparkSession.builder version (3.0.x, 3.1.x, 3.2.x, 3.3.x, and 3.4.x) - If you are local, you can load the Fat JAR from your local FileSystem, however, if you are in a cluster setup you need to put the Fat JAR on a distributed FileSystem such as HDFS, DBFS, S3, etc. ( - i.e., `hdfs:///tmp/spark-nlp-assembly-5.0.0.jar`) + i.e., `hdfs:///tmp/spark-nlp-assembly-5.0.1.jar`) Example of using pretrained Models and Pipelines in offline: diff --git a/build.sbt b/build.sbt index c9e37ecd4a699e..2fdac1c421cc55 100644 --- a/build.sbt +++ b/build.sbt @@ -6,7 +6,7 @@ name := getPackageName(is_silicon, is_gpu, is_aarch64) organization := "com.johnsnowlabs.nlp" -version := "5.0.0" +version := "5.0.1" (ThisBuild / scalaVersion) := scalaVer diff --git a/docs/_layouts/landing.html b/docs/_layouts/landing.html index ad8d347e9edffb..654d6642bf3c8d 100755 --- a/docs/_layouts/landing.html +++ b/docs/_layouts/landing.html @@ -201,7 +201,7 @@

{{ _section.title }}

{% highlight bash %} # Using PyPI - $ pip install spark-nlp==4.4.4 + $ pip install spark-nlp==5.0.1 # Using Anaconda/Conda $ conda install -c johnsnowlabs spark-nlp diff --git a/docs/api/com/index.html b/docs/api/com/index.html index f541dae7aaccae..0e735e306ba9fe 100644 --- a/docs/api/com/index.html +++ b/docs/api/com/index.html @@ -3,9 +3,9 @@ - Spark NLP 5.0.0 ScalaDoc - com - - + Spark NLP 5.0.1 ScalaDoc - com + + @@ -28,7 +28,7 @@