diff --git a/README.md b/README.md
index 0a7e7c8d..53fe86d7 100644
--- a/README.md
+++ b/README.md
@@ -77,7 +77,7 @@ When writing files the API accepts several options:
* `path`: Location to write files.
* `rowTag`: The row tag of your xml files to treat as a row. For example, in ` ...`, the appropriate value would be `book`. Default is `ROW`.
* `rootTag`: The root tag of your xml files to treat as the root. For example, in ` ...`, the appropriate value would be `books`. It can include basic attributes by specifying a value like `books foo="bar"` (as of 0.11.0). Default is `ROWS`.
-* `declaration`: Content of XML declaration to write at the start of every output XML file, before the `rootTag`. For example, a value of `foo` causes `` to be written. Set to null or empty to suppress. Defaults to `version="1.0" encoding="UTF-8" standalone="yes"`. New in 0.14.0.
+* `declaration`: Content of XML declaration to write at the start of every output XML file, before the `rootTag`. For example, a value of `foo` causes `` to be written. Set to empty string to suppress. Defaults to `version="1.0" encoding="UTF-8" standalone="yes"`. New in 0.14.0.
* `nullValue`: The value to write `null` value. Default is string `null`. When this is `null`, it does not write attributes and elements for fields.
* `attributePrefix`: The prefix for attributes so that we can differentiating attributes and elements. This will be the prefix for field names. Default is `_`.
* `valueTag`: The tag used for the value when there are attributes in the element having no child. Default is `_VALUE`.
diff --git a/src/test/scala/com/databricks/spark/xml/XmlSuite.scala b/src/test/scala/com/databricks/spark/xml/XmlSuite.scala
index 87d56151..75f89d2d 100755
--- a/src/test/scala/com/databricks/spark/xml/XmlSuite.scala
+++ b/src/test/scala/com/databricks/spark/xml/XmlSuite.scala
@@ -1233,7 +1233,7 @@ final class XmlSuite extends AnyFunSuite with BeforeAndAfterAll {
test("rootTag with simple attributes") {
val xmlPath = getEmptyTempDir().resolve("simple_attributes")
val df = spark.createDataFrame(Seq((42, "foo"))).toDF("number", "value").repartition(1)
- df.write.option("rootTag", "root foo='bar' bing=\"baz\"").xml(xmlPath.toString)
+ df.write.option("rootTag", "root foo='bar' bing=\"baz\"").option("declaration", "").xml(xmlPath.toString)
val xmlFile =
Files.list(xmlPath).iterator.asScala.filter(_.getFileName.toString.startsWith("part-")).next()