diff --git a/CHANGELOG.md b/CHANGELOG.md index 382f7b6ecab..3c2d37b3e28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Note that this project **does not** adhere to [Semantic Versioning](https://semv - We added a duplicate checker for the Citation Relations tab. [#10414](https://github.com/JabRef/jabref/issues/10414) - We added tooltip on main table cells that shows cell content or cell content and entry preview if set in preferences. [10925](https://github.com/JabRef/jabref/issues/10925) - We added the ability to add a keyword/crossref when typing the separator character (e.g., comma) in the keywords/crossref fields. [#11178](https://github.com/JabRef/jabref/issues/11178) +- We added an exporter and improved the importer for Endnote XML format. [#11137](https://github.com/JabRef/jabref/issues/11137) ### Changed diff --git a/src/main/java/org/jabref/logic/exporter/EndnoteXmlExporter.java b/src/main/java/org/jabref/logic/exporter/EndnoteXmlExporter.java new file mode 100644 index 00000000000..39b2343c665 --- /dev/null +++ b/src/main/java/org/jabref/logic/exporter/EndnoteXmlExporter.java @@ -0,0 +1,308 @@ +package org.jabref.logic.exporter; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.SequencedMap; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import org.jabref.logic.util.StandardFileType; +import org.jabref.model.database.BibDatabase; +import org.jabref.model.database.BibDatabaseContext; +import org.jabref.model.entry.Author; +import org.jabref.model.entry.AuthorList; +import org.jabref.model.entry.BibEntry; +import org.jabref.model.entry.field.Field; +import org.jabref.model.entry.field.StandardField; +import org.jabref.model.entry.field.UnknownField; +import org.jabref.model.entry.types.EntryType; +import org.jabref.model.entry.types.IEEETranEntryType; +import org.jabref.model.entry.types.StandardEntryType; +import org.jabref.preferences.BibEntryPreferences; + +import org.w3c.dom.Document; +import org.w3c.dom.Element; + +public class EndnoteXmlExporter extends Exporter { + + private static final DocumentBuilderFactory DOCUMENT_BUILDER_FACTORY = DocumentBuilderFactory.newInstance(); + + private record EndNoteType(String name, Integer number) { + } + + private static final Map ENTRY_TYPE_MAPPING = new HashMap<>(); + + static { + ENTRY_TYPE_MAPPING.put(StandardEntryType.Article, new EndNoteType("Journal Article", 1)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Book, new EndNoteType("Book", 2)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.InBook, new EndNoteType("Book Section", 3)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.InCollection, new EndNoteType("Book Section", 4)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Proceedings, new EndNoteType("Conference Proceedings", 5)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.MastersThesis, new EndNoteType("Thesis", 6)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.PhdThesis, new EndNoteType("Thesis", 7)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.TechReport, new EndNoteType("Report", 8)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Unpublished, new EndNoteType("Manuscript", 9)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.InProceedings, new EndNoteType("Conference Paper", 10)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Conference, new EndNoteType("Conference", 11)); + ENTRY_TYPE_MAPPING.put(IEEETranEntryType.Patent, new EndNoteType("Patent", 12)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Online, new EndNoteType("Web Page", 13)); + ENTRY_TYPE_MAPPING.put(IEEETranEntryType.Electronic, new EndNoteType("Electronic Article", 14)); + ENTRY_TYPE_MAPPING.put(StandardEntryType.Misc, new EndNoteType("Generic", 15)); + } + + // Contains the mapping of all fields not explicitly handled by mapX methods + // We need a fixed order here, so we use a SequencedMap + private static final SequencedMap STANDARD_FIELD_MAPPING = new LinkedHashMap<>(); + + static { + STANDARD_FIELD_MAPPING.put(StandardField.PAGES, "pages"); + STANDARD_FIELD_MAPPING.put(StandardField.VOLUME, "volume"); + STANDARD_FIELD_MAPPING.put(StandardField.PUBLISHER, "publisher"); + STANDARD_FIELD_MAPPING.put(StandardField.ISBN, "isbn"); + STANDARD_FIELD_MAPPING.put(StandardField.DOI, "electronic-resource-num"); + STANDARD_FIELD_MAPPING.put(StandardField.ABSTRACT, "abstract"); + STANDARD_FIELD_MAPPING.put(StandardField.BOOKTITLE, "secondary-title"); + STANDARD_FIELD_MAPPING.put(StandardField.EDITION, "edition"); + STANDARD_FIELD_MAPPING.put(StandardField.SERIES, "tertiary-title"); + STANDARD_FIELD_MAPPING.put(StandardField.NUMBER, "number"); + STANDARD_FIELD_MAPPING.put(StandardField.ISSUE, "issue"); + STANDARD_FIELD_MAPPING.put(StandardField.LOCATION, "pub-location"); + STANDARD_FIELD_MAPPING.put(StandardField.CHAPTER, "section"); + STANDARD_FIELD_MAPPING.put(StandardField.HOWPUBLISHED, "work-type"); + STANDARD_FIELD_MAPPING.put(StandardField.ISSN, "issn"); + STANDARD_FIELD_MAPPING.put(StandardField.ADDRESS, "auth-address"); + STANDARD_FIELD_MAPPING.put(StandardField.PAGETOTAL, "page-total"); + STANDARD_FIELD_MAPPING.put(StandardField.NOTE, "notes"); + STANDARD_FIELD_MAPPING.put(StandardField.LABEL, "label"); + STANDARD_FIELD_MAPPING.put(StandardField.LANGUAGE, "language"); + STANDARD_FIELD_MAPPING.put(StandardField.KEY, "foreign-keys"); + STANDARD_FIELD_MAPPING.put(new UnknownField("accession-num"), "accession-num"); + } + + private static final EndNoteType DEFAULT_TYPE = new EndNoteType("Generic", 15); + + private final BibEntryPreferences bibEntryPreferences; + + public EndnoteXmlExporter(BibEntryPreferences bibEntryPreferences) { + super("endnote", "EndNote XML", StandardFileType.XML); + this.bibEntryPreferences = bibEntryPreferences; + } + + @Override + public void export(BibDatabaseContext databaseContext, Path file, List entries) throws Exception { + Objects.requireNonNull(databaseContext); + Objects.requireNonNull(file); + Objects.requireNonNull(entries); + + if (entries.isEmpty()) { + return; + } + + DocumentBuilder dBuilder = DOCUMENT_BUILDER_FACTORY.newDocumentBuilder(); + Document document = dBuilder.newDocument(); + + Element rootElement = document.createElement("xml"); + document.appendChild(rootElement); + + Element recordsElement = document.createElement("records"); + rootElement.appendChild(recordsElement); + + for (BibEntry entry : entries) { + Element recordElement = document.createElement("record"); + recordsElement.appendChild(recordElement); + + mapEntryType(entry, document, recordElement); + createMetaInformationElements(databaseContext, document, recordElement); + mapAuthorAndEditor(entry, document, recordElement); + mapTitle(entry, document, recordElement); + mapJournalTitle(entry, document, recordElement); + mapKeywords(databaseContext.getDatabase(), entry, document, recordElement); + mapDates(entry, document, recordElement); + mapUrls(entry, document, recordElement); + + for (Map.Entry fieldMapping : STANDARD_FIELD_MAPPING.entrySet()) { + Field field = fieldMapping.getKey(); + String xmlElement = fieldMapping.getValue(); + + entry.getField(field).ifPresent(value -> { + Element fieldElement = document.createElement(xmlElement); + fieldElement.setTextContent(value); + recordElement.appendChild(fieldElement); + }); + } + } + + Transformer transformer = createTransformer(); + DOMSource source = new DOMSource(document); + StreamResult result = new StreamResult(file.toFile()); + transformer.transform(source, result); + } + + private static void mapTitle(BibEntry entry, Document document, Element recordElement) { + entry.getFieldOrAlias(StandardField.TITLE).ifPresent(title -> { + Element titlesElement = document.createElement("titles"); + + Element titleElement = document.createElement("title"); + titleElement.setTextContent(title); + titlesElement.appendChild(titleElement); + + entry.getField(new UnknownField("alt-title")).ifPresent(altTitle -> { + Element altTitleElement = document.createElement("alt-title"); + altTitleElement.setTextContent(altTitle); + titlesElement.appendChild(altTitleElement); + }); + + entry.getField(StandardField.BOOKTITLE).ifPresent(secondaryTitle -> { + Element secondaryTitleElement = document.createElement("secondary-title"); + secondaryTitleElement.setTextContent(secondaryTitle); + titlesElement.appendChild(secondaryTitleElement); + }); + + recordElement.appendChild(titlesElement); + }); + } + + private static void mapJournalTitle(BibEntry entry, Document document, Element recordElement) { + entry.getFieldOrAlias(StandardField.JOURNAL).ifPresent(journalTitle -> { + Element periodicalElement = document.createElement("periodical"); + Element fullTitleElement = document.createElement("full-title"); + fullTitleElement.setTextContent(journalTitle); + periodicalElement.appendChild(fullTitleElement); + recordElement.appendChild(periodicalElement); + }); + } + + private void mapKeywords(BibDatabase bibDatabase, BibEntry entry, Document document, Element recordElement) { + entry.getFieldOrAlias(StandardField.KEYWORDS).ifPresent(keywords -> { + Element keywordsElement = document.createElement("keywords"); + entry.getResolvedKeywords(bibEntryPreferences.getKeywordSeparator(), bibDatabase).forEach(keyword -> { + Element keywordElement = document.createElement("keyword"); + // Hierarchical keywords are separated by the '>' character. See {@link } for details. + keywordElement.setTextContent(keyword.get()); + keywordsElement.appendChild(keywordElement); + }); + recordElement.appendChild(keywordsElement); + }); + } + + private static void mapUrls(BibEntry entry, Document document, Element recordElement) { + Element urlsElement = document.createElement("urls"); + + entry.getFieldOrAlias(StandardField.FILE).ifPresent(fileField -> { + Element pdfUrlsElement = document.createElement("pdf-urls"); + Element urlElement = document.createElement("url"); + urlElement.setTextContent(fileField); + pdfUrlsElement.appendChild(urlElement); + urlsElement.appendChild(pdfUrlsElement); + }); + + entry.getFieldOrAlias(StandardField.URL).ifPresent(url -> { + Element webUrlsElement = document.createElement("web-urls"); + Element urlElement = document.createElement("url"); + urlElement.setTextContent(url); + webUrlsElement.appendChild(urlElement); + urlsElement.appendChild(webUrlsElement); + }); + + if (urlsElement.hasChildNodes()) { + recordElement.appendChild(urlsElement); + } + } + + private static void mapDates(BibEntry entry, Document document, Element recordElement) { + Element datesElement = document.createElement("dates"); + entry.getFieldOrAlias(StandardField.YEAR).ifPresent(year -> { + Element yearElement = document.createElement("year"); + yearElement.setTextContent(year); + datesElement.appendChild(yearElement); + }); + entry.getFieldOrAlias(StandardField.MONTH).ifPresent(month -> { + Element yearElement = document.createElement("month"); + yearElement.setTextContent(month); + datesElement.appendChild(yearElement); + }); + entry.getFieldOrAlias(StandardField.DAY).ifPresent(day -> { + Element yearElement = document.createElement("day"); + yearElement.setTextContent(day); + datesElement.appendChild(yearElement); + }); + // We need to use getField here - getFieldOrAlias for Date tries to convert year, month, and day to a date, which we do not want + entry.getField(StandardField.DATE).ifPresent(date -> { + Element pubDatesElement = document.createElement("pub-dates"); + Element dateElement = document.createElement("date"); + dateElement.setTextContent(date); + pubDatesElement.appendChild(dateElement); + datesElement.appendChild(pubDatesElement); + }); + if (datesElement.hasChildNodes()) { + recordElement.appendChild(datesElement); + } + } + + private static void mapEntryType(BibEntry entry, Document document, Element recordElement) { + EntryType entryType = entry.getType(); + EndNoteType endNoteType = ENTRY_TYPE_MAPPING.getOrDefault(entryType, DEFAULT_TYPE); + Element refTypeElement = document.createElement("ref-type"); + refTypeElement.setAttribute("name", endNoteType.name()); + refTypeElement.setTextContent(endNoteType.number().toString()); + recordElement.appendChild(refTypeElement); + } + + private static void createMetaInformationElements(BibDatabaseContext databaseContext, Document document, Element recordElement) { + Element databaseElement = document.createElement("database"); + databaseElement.setAttribute("name", "MyLibrary"); + String name = databaseContext.getDatabasePath().map(Path::getFileName).map(Path::toString).orElse("MyLibrary"); + databaseElement.setTextContent(name); + recordElement.appendChild(databaseElement); + + Element sourceAppElement = document.createElement("source-app"); + sourceAppElement.setAttribute("name", "JabRef"); + sourceAppElement.setTextContent("JabRef"); + recordElement.appendChild(sourceAppElement); + } + + private static void mapAuthorAndEditor(BibEntry entry, Document document, Element recordElement) { + Element contributorsElement = document.createElement("contributors"); + entry.getField(StandardField.AUTHOR).ifPresent(authors -> { + addPersons(authors, document, contributorsElement, "authors"); + }); + entry.getField(StandardField.EDITOR).ifPresent(editors -> { + addPersons(editors, document, contributorsElement, "secondary-authors"); + }); + if (contributorsElement.hasChildNodes()) { + recordElement.appendChild(contributorsElement); + } + } + + private static void addPersons(String authors, Document document, Element contributorsElement, String wrapTagName) { + Element container = document.createElement(wrapTagName); + AuthorList parsedPersons = AuthorList.parse(authors).latexFree(); + for (Author person : parsedPersons) { + Element authorElement = document.createElement("author"); + authorElement.setTextContent(person.getFamilyGiven(false)); + container.appendChild(authorElement); + } + contributorsElement.appendChild(container); + } + + private static Transformer createTransformer() throws TransformerConfigurationException { + TransformerFactory transformerFactory = TransformerFactory.newInstance(); + Transformer transformer = transformerFactory.newTransformer(); + transformer.setOutputProperty(OutputKeys.ENCODING, StandardCharsets.UTF_8.name()); + transformer.setOutputProperty(OutputKeys.INDENT, "yes"); + transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); + return transformer; + } +} diff --git a/src/main/java/org/jabref/logic/exporter/ExporterFactory.java b/src/main/java/org/jabref/logic/exporter/ExporterFactory.java index 23c374c3b87..786cd33c333 100644 --- a/src/main/java/org/jabref/logic/exporter/ExporterFactory.java +++ b/src/main/java/org/jabref/logic/exporter/ExporterFactory.java @@ -63,6 +63,7 @@ public static ExporterFactory create(PreferencesService preferencesService, exporters.add(new XmpPdfExporter(xmpPreferences)); exporters.add(new EmbeddedBibFilePdfExporter(bibDatabaseMode, entryTypesManager, fieldPreferences)); exporters.add(new CffExporter()); + exporters.add(new EndnoteXmlExporter(preferencesService.getBibEntryPreferences())); // Now add custom export formats exporters.addAll(customFormats); diff --git a/src/main/java/org/jabref/logic/importer/fileformat/EndnoteXmlImporter.java b/src/main/java/org/jabref/logic/importer/fileformat/EndnoteXmlImporter.java index 0e05618632a..5c7f3aa6777 100644 --- a/src/main/java/org/jabref/logic/importer/fileformat/EndnoteXmlImporter.java +++ b/src/main/java/org/jabref/logic/importer/fileformat/EndnoteXmlImporter.java @@ -4,16 +4,14 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.net.MalformedURLException; -import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.StringJoiner; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; @@ -28,33 +26,77 @@ import org.jabref.logic.util.StandardFileType; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.KeywordList; -import org.jabref.model.entry.LinkedFile; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.field.UnknownField; import org.jabref.model.entry.types.EntryType; import org.jabref.model.entry.types.IEEETranEntryType; import org.jabref.model.entry.types.StandardEntryType; -import org.jabref.model.strings.StringUtil; -import com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * Importer for the Endnote XML format. - *

- * Based on dtd scheme downloaded from Article #122577 in http://kbportal.thomson.com. - */ public class EndnoteXmlImporter extends Importer implements Parser { private static final Logger LOGGER = LoggerFactory.getLogger(EndnoteXmlImporter.class); + + private static final Map ENTRY_TYPE_MAPPING = Map.ofEntries( + Map.entry(StandardEntryType.Article, "Journal Article"), + Map.entry(StandardEntryType.Book, "Book"), + Map.entry(StandardEntryType.InBook, "Book Section"), + Map.entry(StandardEntryType.InCollection, "Book Section"), + Map.entry(StandardEntryType.Proceedings, "Conference Proceedings"), + Map.entry(StandardEntryType.MastersThesis, "Thesis"), + Map.entry(StandardEntryType.PhdThesis, "Thesis"), + Map.entry(StandardEntryType.TechReport, "Report"), + Map.entry(StandardEntryType.Unpublished, "Manuscript"), + Map.entry(StandardEntryType.InProceedings, "Conference Paper"), + Map.entry(StandardEntryType.Conference, "Conference"), + Map.entry(IEEETranEntryType.Patent, "Patent"), + Map.entry(StandardEntryType.Online, "Web Page"), + Map.entry(IEEETranEntryType.Electronic, "Electronic Article"), + Map.entry(StandardEntryType.Misc, "Generic") + ); + + private static final Map FIELD_MAPPING = Map.ofEntries( + Map.entry(StandardField.TITLE, "title"), + Map.entry(StandardField.AUTHOR, "authors"), + Map.entry(StandardField.EDITOR, "secondary-authors"), + Map.entry(StandardField.BOOKTITLE, "secondary-title"), + Map.entry(StandardField.EDITION, "edition"), + Map.entry(StandardField.SERIES, "tertiary-title"), + Map.entry(StandardField.VOLUME, "volume"), + Map.entry(StandardField.NUMBER, "number"), + Map.entry(StandardField.ISSUE, "issue"), + Map.entry(StandardField.PAGES, "pages"), + Map.entry(StandardField.LOCATION, "pub-location"), + Map.entry(StandardField.CHAPTER, "section"), + Map.entry(StandardField.HOWPUBLISHED, "work-type"), + Map.entry(StandardField.PUBLISHER, "publisher"), + Map.entry(StandardField.ISBN, "isbn"), + Map.entry(StandardField.ISSN, "issn"), + Map.entry(StandardField.DOI, "electronic-resource-num"), + Map.entry(StandardField.URL, "web-urls"), + Map.entry(StandardField.FILE, "pdf-urls"), + Map.entry(StandardField.ABSTRACT, "abstract"), + Map.entry(StandardField.KEYWORDS, "keywords"), + Map.entry(StandardField.PAGETOTAL, "page-total"), + Map.entry(StandardField.NOTE, "notes"), + // Map.entry(StandardField.LABEL, "label"), // We omit this field + Map.entry(StandardField.LANGUAGE, "language"), + // Map.entry(StandardField.KEY, "foreign-keys"), // We omit this field + Map.entry(StandardField.ADDRESS, "auth-address") + ); + private static final UnknownField FIELD_ALT_TITLE = new UnknownField("alt-title"); + private final ImportFormatPreferences preferences; + private final XMLInputFactory xmlInputFactory; public EndnoteXmlImporter(ImportFormatPreferences preferences) { this.preferences = preferences; xmlInputFactory = XMLInputFactory.newInstance(); + // prevent xxe (https://rules.sonarsource.com/java/RSPEC-2755) // not suported by aalto-xml // xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, ""); @@ -62,10 +104,6 @@ public EndnoteXmlImporter(ImportFormatPreferences preferences) { xmlInputFactory.setProperty(XMLInputFactory.IS_COALESCING, true); } - private static String join(List list, String string) { - return Joiner.on(string).join(list); - } - @Override public String getName() { return "EndNote XML"; @@ -94,7 +132,6 @@ public boolean isRecognizedFormat(BufferedReader reader) throws IOException { if (str.toLowerCase(Locale.ENGLISH).contains("")) { return true; } - i++; } return false; @@ -111,11 +148,9 @@ public ParserResult importDatabase(BufferedReader input) throws IOException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - if ("record".equals(elementName)) { - parseRecord(reader, bibItems, elementName); - } + if (isStartElement(reader, "record")) { + BibEntry entry = parseRecord(reader); + bibItems.add(entry); } } } catch (XMLStreamException e) { @@ -125,366 +160,300 @@ public ParserResult importDatabase(BufferedReader input) throws IOException { return new ParserResult(bibItems); } - private void parseRecord(XMLStreamReader reader, List bibItems, String startElement) - throws XMLStreamException { - - Map fields = new HashMap<>(); - EntryType entryType = StandardEntryType.Article; - - KeywordList keywordList = new KeywordList(); - List linkedFiles = new ArrayList<>(); + private BibEntry parseRecord(XMLStreamReader reader) throws XMLStreamException { + BibEntry entry = new BibEntry(); while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { + if (isEndElement(reader, "record")) { + break; + } + + if (isStartElement(reader)) { String elementName = reader.getName().getLocalPart(); switch (elementName) { case "ref-type" -> { - String type = reader.getAttributeValue(null, "name"); - entryType = convertRefNameToType(type); + String refType = reader.getAttributeValue(null, "name"); + EntryType entryType = ENTRY_TYPE_MAPPING.entrySet().stream() + .filter(e -> e.getValue().equals(refType)) + .map(Map.Entry::getKey) + .findFirst() + .orElse(StandardEntryType.Misc); + entry.setType(entryType); } case "contributors" -> { - handleAuthorList(reader, fields, elementName); + parseContributors(reader, entry); } case "titles" -> { - handleTitles(reader, fields, elementName); - } - case "pages" -> { - parseStyleContent(reader, fields, StandardField.PAGES, elementName); - } - case "volume" -> { - parseStyleContent(reader, fields, StandardField.VOLUME, elementName); - } - case "number" -> { - parseStyleContent(reader, fields, StandardField.NUMBER, elementName); - } - case "dates" -> { - parseYear(reader, fields); - } - case "notes" -> { - parseStyleContent(reader, fields, StandardField.NOTE, elementName); + parseTitles(reader, entry); } - case "urls" -> { - handleUrlList(reader, fields, linkedFiles); + case "periodical" -> { + parsePeriodical(reader, entry); } case "keywords" -> { - handleKeywordsList(reader, keywordList, elementName); - } - case "abstract" -> { - parseStyleContent(reader, fields, StandardField.ABSTRACT, elementName); - } - case "isbn" -> { - parseStyleContent(reader, fields, StandardField.ISBN, elementName); + parseKeywords(reader, entry); } - case "electronic-resource-num" -> { - parseStyleContent(reader, fields, StandardField.DOI, elementName); + case "urls" -> { + parseUrls(reader, entry); } - case "publisher" -> { - parseStyleContent(reader, fields, StandardField.PUBLISHER, elementName); + case "dates" -> { + parseDates(reader, entry); } - case "label" -> { - parseStyleContent(reader, fields, new UnknownField("endnote-label"), elementName); + // TODO: Left for future work -- test files need to be adpated + // case "accession-num" -> { + // String accessionNumber = parseElementContent(reader, "accession-num"); + // entry.setField(new UnknownField("accession-num"), accessionNumber); + // } + default -> { + Field field = FIELD_MAPPING.entrySet().stream() + .filter(e -> e.getValue().equals(elementName)) + .map(Map.Entry::getKey) + .findFirst() + .orElse(null); + if (field != null) { + String value = parseElementContent(reader, elementName); + entry.setField(field, value); + } } } } - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { - break; - } } - BibEntry entry = new BibEntry(entryType); - entry.putKeywords(keywordList, preferences.bibEntryPreferences().getKeywordSeparator()); - - entry.setField(fields); - entry.setFiles(linkedFiles); - bibItems.add(entry); - } + // Cleanup: Remove alt-title if it matches the journal + String journalOrBooktitle = entry.getField(StandardField.JOURNAL).or(() -> entry.getField(StandardField.BOOKTITLE)).orElse(""); + if (entry.hasField(FIELD_ALT_TITLE)) { + String altTitle = entry.getField(FIELD_ALT_TITLE).orElse(""); + if (journalOrBooktitle.equals(altTitle)) { + entry.clearField(FIELD_ALT_TITLE); + } + } - private static EntryType convertRefNameToType(String refName) { - return switch (refName.toLowerCase().trim()) { - case "artwork", "generic" -> StandardEntryType.Misc; - case "electronic article" -> IEEETranEntryType.Electronic; - case "book section" -> StandardEntryType.InBook; - case "book" -> StandardEntryType.Book; - case "report" -> StandardEntryType.Report; - // case "journal article" -> StandardEntryType.Article; - default -> StandardEntryType.Article; - }; + return entry; } - private void handleAuthorList(XMLStreamReader reader, Map fields, String startElement) throws XMLStreamException { - List authorNames = new ArrayList<>(); - + private void parseContributors(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "author" -> { - parseAuthor(reader, authorNames); - } - } - } - - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { + if (isEndElement(reader, "contributors")) { break; } + extractPersons(reader, "authors", entry, StandardField.AUTHOR); + extractPersons(reader, "secondary-authors", entry, StandardField.EDITOR); } - fields.put(StandardField.AUTHOR, join(authorNames, " and ")); } - private void parseAuthor(XMLStreamReader reader, List authorNames) throws XMLStreamException { - - while (reader.hasNext()) { - reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "style" -> { - reader.next(); - if (isCharacterXMLEvent(reader)) { - authorNames.add(reader.getText()); - } + private void extractPersons(XMLStreamReader reader, String elementName, BibEntry entry, StandardField author) throws XMLStreamException { + if (isStartElement(reader, elementName)) { + StringJoiner persons = new StringJoiner(" and "); + while (reader.hasNext()) { + reader.next(); + if (isEndElement(reader, elementName)) { + break; + } + if (isStartElement(reader, "author")) { + String person = parseElementContent(reader, "author"); + if (!person.isEmpty()) { + persons.add(person); } } } - - if (isEndXMLEvent(reader) && "author".equals(reader.getName().getLocalPart())) { - break; - } + entry.setField(author, persons.toString()); } } - private void parseStyleContent(XMLStreamReader reader, Map fields, Field field, String elementName) throws XMLStreamException { + private void parseTitles(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String tag = reader.getName().getLocalPart(); - if ("style".equals(tag)) { - reader.next(); - if (isCharacterXMLEvent(reader)) { - if ("abstract".equals(elementName) || "electronic-resource-num".equals(elementName) || "notes".equals(elementName)) { - putIfValueNotNull(fields, field, reader.getText().trim()); - } else if ("isbn".equals(elementName) || "secondary-title".equals(elementName)) { - putIfValueNotNull(fields, field, clean(reader.getText())); + if (isEndElement(reader, "titles")) { + break; + } + + if (isStartElement(reader)) { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "title" -> { + String title = parseElementContent(reader, "title"); + entry.setField(StandardField.TITLE, title); + } + case "secondary-title" -> { + String secondaryTitle = parseElementContent(reader, "secondary-title"); + if (entry.getType().equals(StandardEntryType.Article)) { + entry.setField(StandardField.JOURNAL, secondaryTitle); } else { - putIfValueNotNull(fields, field, reader.getText()); + entry.setField(StandardField.BOOKTITLE, secondaryTitle); } } + case "alt-title" -> { + String altTitle = parseElementContent(reader, "alt-title"); + entry.setField(FIELD_ALT_TITLE, altTitle); + } } } - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(elementName)) { - break; - } } } - private void parseYear(XMLStreamReader reader, Map fields) throws XMLStreamException { + private void parsePeriodical(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "style" -> { - reader.next(); - if (isCharacterXMLEvent(reader)) { - putIfValueNotNull(fields, StandardField.YEAR, reader.getText()); - } - } - } + if (isEndElement(reader, "periodical")) { + break; } - if (isEndXMLEvent(reader) && "year".equals(reader.getName().getLocalPart())) { - break; + if (isStartElement(reader)) { + parseJournalOrBookTitle(reader, entry); } } } - private void handleKeywordsList(XMLStreamReader reader, KeywordList keywordList, String startElement) throws XMLStreamException { - - while (reader.hasNext()) { - reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "keyword" -> { - parseKeyword(reader, keywordList); - } + private void parseJournalOrBookTitle(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { + String elementName = reader.getName().getLocalPart(); + switch (elementName) { + case "full-title", "abbr-2", "abbr-1", "abbr-3" -> { + String title = parseElementContent(reader, elementName); + if (entry.getType().equals(StandardEntryType.Article)) { + entry.setField(StandardField.JOURNAL, title); + } else { + entry.setField(StandardField.BOOKTITLE, title); } } - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { - break; - } } } - private void parseKeyword(XMLStreamReader reader, KeywordList keywordList) throws XMLStreamException { - + private void parseKeywords(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { + KeywordList keywordList = new KeywordList(); while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "style" -> { - reader.next(); - if (isCharacterXMLEvent(reader)) { - if (reader.getText() != null) { - keywordList.add(reader.getText()); - } - } - } - } + if (isEndElement(reader, "keywords")) { + break; } - if (isEndXMLEvent(reader) && "keyword".equals(reader.getName().getLocalPart())) { - break; + if (isStartElement(reader, "keyword")) { + String keyword = parseElementContent(reader, "keyword"); + if (!keyword.isEmpty()) { + keywordList.add(keyword); + } } } + if (!keywordList.isEmpty()) { + entry.putKeywords(keywordList, preferences.bibEntryPreferences().getKeywordSeparator()); + } } - private void handleTitles(XMLStreamReader reader, Map fields, String startElement) throws XMLStreamException { - + private void parseUrls(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { + if (isEndElement(reader, "urls")) { + break; + } + + if (isStartElement(reader)) { String elementName = reader.getName().getLocalPart(); switch (elementName) { - case "title" -> { - List titleStyleContent = new ArrayList<>(); + case "web-urls" -> { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String tag = reader.getName().getLocalPart(); - if ("style".equals(tag)) { - reader.next(); - if (isCharacterXMLEvent(reader)) { - if (reader.getText() != null) { - titleStyleContent.add((reader.getText())); - } - } - } + if (isEndElement(reader, "web-urls")) { + break; } - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(elementName)) { + if (isStartElement(reader, "url")) { + String url = parseElementContent(reader, "url"); + entry.setField(StandardField.URL, url); + } + } + } + case "pdf-urls" -> { + while (reader.hasNext()) { + reader.next(); + if (isEndElement(reader, "pdf-urls")) { break; } + if (isStartElement(reader, "url")) { + String file = parseElementContent(reader, "url"); + entry.setField(StandardField.FILE, file); + } } - putIfValueNotNull(fields, StandardField.TITLE, clean(join(titleStyleContent, ""))); } - case "secondary-title" -> { - parseStyleContent(reader, fields, StandardField.JOURNAL, elementName); + case "related-urls" -> { + while (reader.hasNext()) { + reader.next(); + if (isEndElement(reader, "related-urls")) { + break; + } + if (isStartElement(reader, "url")) { + String url = clean(parseElementContent(reader, "url")); + entry.setField(StandardField.URL, url); + } + } } } } - - if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) { - break; - } } } - private void handleUrlList(XMLStreamReader reader, Map fields, List linkedFiles) throws XMLStreamException { + private void parseDates(XMLStreamReader reader, BibEntry entry) throws XMLStreamException { while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - switch (elementName) { - case "related-urls" -> { - parseRelatedUrls(reader, fields); - } - case "pdf-urls" -> { - parsePdfUrls(reader, fields, linkedFiles); - } - } - } - - if (isEndXMLEvent(reader) && "urls".equals(reader.getName().getLocalPart())) { + if (isEndElement(reader, "dates")) { break; } - } - } - private void parseRelatedUrls(XMLStreamReader reader, Map fields) throws XMLStreamException { - - while (reader.hasNext()) { - reader.next(); - if (isStartXMLEvent(reader)) { + if (isStartElement(reader)) { String elementName = reader.getName().getLocalPart(); - if ("style".equals(elementName)) { - reader.next(); - if (isCharacterXMLEvent(reader)) { - putIfValueNotNull(fields, StandardField.URL, reader.getText()); + switch (elementName) { + case "year", "month", "day" -> { + String date = parseElementContent(reader, elementName); + entry.setField(StandardField.fromName(elementName).get(), date); + } + case "pub-dates" -> { + while (reader.hasNext()) { + reader.next(); + if (isEndElement(reader, "pub-dates")) { + break; + } + if (isStartElement(reader, "date")) { + String pubDate = parseElementContent(reader, "date"); + entry.setField(StandardField.DATE, pubDate); + } + } } } - } else if (isCharacterXMLEvent(reader)) { - String value = clean(reader.getText()); - if (value.length() > 0) { - putIfValueNotNull(fields, StandardField.URL, clean(value)); - } - } - - if (isEndXMLEvent(reader) && "related-urls".equals(reader.getName().getLocalPart())) { - break; } } } - private void parsePdfUrls(XMLStreamReader reader, Map fields, List linkedFiles) throws XMLStreamException { - + private String parseElementContent(XMLStreamReader reader, String elementName) throws XMLStreamException { + StringBuilder content = new StringBuilder(); while (reader.hasNext()) { reader.next(); - if (isStartXMLEvent(reader)) { - String elementName = reader.getName().getLocalPart(); - if ("url".equals(elementName)) { - reader.next(); - if (isStartXMLEvent(reader)) { - String tagName = reader.getName().getLocalPart(); - if ("style".equals(tagName)) { - reader.next(); - if (isCharacterXMLEvent(reader)) { - try { - linkedFiles.add(new LinkedFile(new URL(reader.getText()), "PDF")); - } catch (MalformedURLException e) { - LOGGER.info("Unable to parse {}", reader.getText()); - } - } - } - } - } - } - if (isCharacterXMLEvent(reader)) { - try { - linkedFiles.add(new LinkedFile(new URL(reader.getText()), "PDF")); - } catch (MalformedURLException e) { - LOGGER.info("Unable to parse {}", reader.getText()); - } - } - if (isEndXMLEvent(reader) && "pdf-urls".equals(reader.getName().getLocalPart())) { + if (isEndElement(reader, elementName)) { break; } + if (isStartElement(reader, "style")) { + content.append(reader.getElementText()).append(" "); + } else if (reader.getEventType() == XMLEvent.CHARACTERS) { + content.append(reader.getText()); + } } + return clean(content.toString()); } private String clean(String input) { - return StringUtil.unifyLineBreaks(input, " ") - .trim() - .replaceAll(" +", " "); + return input.trim().replaceAll("\\s+", " "); } - private void putIfValueNotNull(Map fields, Field field, String value) { - if (value != null) { - fields.put(field, value); - } + private boolean isStartElement(XMLStreamReader reader, String elementName) { + return isStartElement(reader) && reader.getName().getLocalPart().equals(elementName); } - private boolean isCharacterXMLEvent(XMLStreamReader reader) { - return reader.getEventType() == XMLEvent.CHARACTERS; + private boolean isStartElement(XMLStreamReader reader) { + return reader.getEventType() == XMLEvent.START_ELEMENT; } - private boolean isStartXMLEvent(XMLStreamReader reader) { - return reader.getEventType() == XMLEvent.START_ELEMENT; + private boolean isEndElement(XMLStreamReader reader, String elementName) { + return isEndElement(reader) && reader.getName().getLocalPart().equals(elementName); } - private boolean isEndXMLEvent(XMLStreamReader reader) { + private boolean isEndElement(XMLStreamReader reader) { return reader.getEventType() == XMLEvent.END_ELEMENT; } @@ -494,13 +463,8 @@ public List parseEntries(InputStream inputStream) throws ParseExceptio return importDatabase( new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))).getDatabase().getEntries(); } catch (IOException e) { - LOGGER.error(e.getLocalizedMessage(), e); + LOGGER.error("Could not import file", e); } return Collections.emptyList(); } } - - - - - diff --git a/src/main/java/org/jabref/migrations/MergeReviewIntoCommentMigration.java b/src/main/java/org/jabref/migrations/MergeReviewIntoCommentMigration.java index e599b463564..f7d86408070 100644 --- a/src/main/java/org/jabref/migrations/MergeReviewIntoCommentMigration.java +++ b/src/main/java/org/jabref/migrations/MergeReviewIntoCommentMigration.java @@ -55,7 +55,7 @@ private static boolean hasReviewField(BibEntry entry) { private String mergeCommentFieldIfPresent(BibEntry entry, String review) { if (entry.getField(StandardField.COMMENT).isPresent()) { - LOGGER.info("Both Comment and Review fields are present in %s! Merging them into the comment field.".formatted(entry.getAuthorTitleYear(150))); + LOGGER.info("Both Comment and Review fields are present in {}. Merging them into the comment field.", entry.getCitationKey().orElse(entry.getAuthorTitleYear(150))); return "%s\n%s:\n%s".formatted(entry.getField(StandardField.COMMENT).get().trim(), Localization.lang("Review"), review.trim()); } return review; diff --git a/src/main/java/org/jabref/model/entry/AuthorList.java b/src/main/java/org/jabref/model/entry/AuthorList.java index 17b68e02fbe..092d04de61c 100644 --- a/src/main/java/org/jabref/model/entry/AuthorList.java +++ b/src/main/java/org/jabref/model/entry/AuthorList.java @@ -1,6 +1,7 @@ package org.jabref.model.entry; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -117,7 +118,7 @@ * */ @AllowedToUseLogic("because it needs access to AuthorList parser") -public class AuthorList { +public class AuthorList implements Iterable { private static final Map AUTHOR_CACHE = Collections.synchronizedMap(new WeakHashMap<>()); private final List authors; @@ -279,7 +280,7 @@ public Author getAuthor(int i) { } /** - * Returns the a list of Author objects. + * Returns the list of Author objects. * * @return the List<Author> object. */ @@ -484,4 +485,9 @@ public String getForAlphabetization() { .map(Author::getNameForAlphabetization) .collect(Collectors.joining(" and ")); } + + @Override + public Iterator iterator() { + return authors.iterator(); + } } diff --git a/src/main/java/org/jabref/model/entry/BibEntry.java b/src/main/java/org/jabref/model/entry/BibEntry.java index 3e26335a11e..19b33f257fb 100644 --- a/src/main/java/org/jabref/model/entry/BibEntry.java +++ b/src/main/java/org/jabref/model/entry/BibEntry.java @@ -392,6 +392,9 @@ public BibEntry withCitationKey(String newKey) { return this; } + /** + * If not present, {@link BibEntry#getAuthorTitleYear(int)} can be used + */ public Optional getCitationKey() { String key = fields.get(InternalField.KEY_FIELD); if (StringUtil.isBlank(key)) { diff --git a/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterFilesTest.java b/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterFilesTest.java new file mode 100644 index 00000000000..38838b388eb --- /dev/null +++ b/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterFilesTest.java @@ -0,0 +1,96 @@ +package org.jabref.logic.exporter; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Stream; + +import org.jabref.logic.bibtex.BibEntryAssert; +import org.jabref.logic.importer.ImportFormatPreferences; +import org.jabref.logic.importer.fileformat.BibtexImporter; +import org.jabref.logic.importer.fileformat.EndnoteXmlImporter; +import org.jabref.model.database.BibDatabaseContext; +import org.jabref.model.entry.BibEntry; +import org.jabref.model.util.DummyFileUpdateMonitor; +import org.jabref.preferences.BibEntryPreferences; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Answers; +import org.xmlunit.diff.DefaultNodeMatcher; +import org.xmlunit.diff.ElementSelectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.xmlunit.matchers.CompareMatcher.isSimilarTo; + +public class EndnoteXmlExporterFilesTest { + + private Exporter exporter; + private BibDatabaseContext databaseContext; + private Path exportFile; + private Path bibFileToExport; + private BibtexImporter bibtexImporter; + private EndnoteXmlImporter endnoteXmlImporter; + + @BeforeEach + public void setUp(@TempDir Path testFolder) throws Exception { + ImportFormatPreferences importFormatPreferences = mock(ImportFormatPreferences.class, Answers.RETURNS_DEEP_STUBS); + when(importFormatPreferences.bibEntryPreferences()).thenReturn(mock(BibEntryPreferences.class)); + when(importFormatPreferences.bibEntryPreferences().getKeywordSeparator()).thenReturn(','); + + databaseContext = new BibDatabaseContext(); + exporter = new EndnoteXmlExporter(new BibEntryPreferences(',')); + endnoteXmlImporter = new EndnoteXmlImporter(importFormatPreferences); + bibtexImporter = new BibtexImporter(importFormatPreferences, new DummyFileUpdateMonitor()); + exportFile = testFolder.resolve("exported-endnote.xml").toAbsolutePath(); + } + + static Stream fileNames() throws IOException, URISyntaxException { + // we have to point it to one existing file, otherwise it will return the default class path + Path resourceDir = Path.of(EndnoteXmlExporterFilesTest.class.getResource("EndnoteXmlExportTestSingleBookEntry.bib").toURI()).getParent(); + try (Stream stream = Files.list(resourceDir)) { + return stream.map(n -> n.getFileName().toString()) + .filter(n -> n.endsWith(".bib")) + .filter(n -> n.startsWith("EndnoteXml")) + // mapping required, because we get "source already consumed or closed" otherwise + .toList().stream(); + } + } + + @ParameterizedTest + @MethodSource("fileNames") + public final void performExport(String filename) throws Exception { + bibFileToExport = Path.of(EndnoteXmlExporterFilesTest.class.getResource(filename).toURI()); + List entries = bibtexImporter.importDatabase(bibFileToExport).getDatabase().getEntries(); + exporter.export(databaseContext, exportFile, entries); + String actual = String.join("\n", Files.readAllLines(exportFile)); + + String xmlFileName = filename.replace(".bib", ".xml"); + Path expectedFile = Path.of(ModsExportFormatFilesTest.class.getResource(xmlFileName).toURI()); + String expected = String.join("\n", Files.readAllLines(expectedFile)); + + // The order of the XML elements changes + // The order does not really matter, so we ignore it. + // Source: https://stackoverflow.com/a/16540679/873282 + assertThat(actual, isSimilarTo(expected) + .ignoreWhitespace() + .normalizeWhitespace() + .withNodeMatcher(new DefaultNodeMatcher(ElementSelectors.byNameAndText))); + } + + @ParameterizedTest + @MethodSource("fileNames") + public final void exportAsEndnoteAndThenImportAsEndnote(String filename) throws Exception { + bibFileToExport = Path.of(EndnoteXmlExporterFilesTest.class.getResource(filename).toURI()); + List entries = bibtexImporter.importDatabase(bibFileToExport).getDatabase().getEntries(); + + exporter.export(databaseContext, exportFile, entries); + BibEntryAssert.assertEquals(entries, exportFile, endnoteXmlImporter); + } +} diff --git a/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterTest.java b/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterTest.java new file mode 100644 index 00000000000..383a32e02e1 --- /dev/null +++ b/src/test/java/org/jabref/logic/exporter/EndnoteXmlExporterTest.java @@ -0,0 +1,79 @@ +package org.jabref.logic.exporter; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + +import org.jabref.logic.importer.ImportFormatPreferences; +import org.jabref.model.database.BibDatabaseContext; +import org.jabref.model.entry.BibEntry; +import org.jabref.model.entry.field.StandardField; +import org.jabref.model.entry.types.StandardEntryType; +import org.jabref.preferences.BibEntryPreferences; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Answers; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class EndnoteXmlExporterTest { + + private Exporter exporter; + private BibDatabaseContext databaseContext; + private BibEntry bookEntry; + + @BeforeEach + public void setUp() throws Exception { + ImportFormatPreferences importFormatPreferences = mock(ImportFormatPreferences.class, Answers.RETURNS_DEEP_STUBS); + when(importFormatPreferences.bibEntryPreferences()).thenReturn(mock(BibEntryPreferences.class)); + when(importFormatPreferences.bibEntryPreferences().getKeywordSeparator()).thenReturn(','); + + databaseContext = new BibDatabaseContext(); + exporter = new EndnoteXmlExporter(new BibEntryPreferences(',')); + + bookEntry = new BibEntry(StandardEntryType.Book) + .withCitationKey("Bhattacharyya2013") + .withField(StandardField.EDITOR, "Bhattacharyya, R. and McCormick, M. E.") + .withField(StandardField.PUBLISHER, "Elsevier Science") + .withField(StandardField.TITLE, "Wave Energy Conversion") + .withField(StandardField.YEAR, "2013") + .withField(StandardField.ISBN, "9780080442129") + .withField(StandardField.FILE, "/home/mfg/acad/ext/arts/waves/water/[R._Bhattacharyya_and_M.E._McCormick_(Eds.)]_Wave_(z-lib.org).pdf") + .withField(StandardField.KEYWORDS, "waves, agua"); + } + + @Test + public void exportForEmptyEntryList(@TempDir Path tempDir) throws Exception { + Path file = tempDir.resolve("EmptyFile.xml"); + + exporter.export(databaseContext, file, Collections.emptyList()); + assertFalse(Files.exists(file)); + } + + @Test + public void exportForNullDBThrowsException(@TempDir Path tempDir) { + Path file = tempDir.resolve("NullDB"); + + assertThrows(NullPointerException.class, () -> + exporter.export(null, file, Collections.singletonList(bookEntry))); + } + + @Test + public void exportForNullExportPathThrowsException(@TempDir Path tempDir) { + assertThrows(NullPointerException.class, () -> + exporter.export(databaseContext, null, Collections.singletonList(bookEntry))); + } + + @Test + public void exportForNullEntryListThrowsException(@TempDir Path tempDir) { + Path file = tempDir.resolve("EntryNull"); + + assertThrows(NullPointerException.class, () -> + exporter.export(databaseContext, file, null)); + } +} diff --git a/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.bib b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.bib new file mode 100644 index 00000000000..963813f43e6 --- /dev/null +++ b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.bib @@ -0,0 +1,30 @@ +% Encoding: UTF-8 + +@conference{, + abstract = {An episode rule of associating two episodes represents a temporal implication of the antecedent episode to the consequent episode. Episode-rule mining is a task of extracting useful patterns/episodes from large event databases. We present an episode-rule mining algorithm for finding frequent and confident serial-episode rules via first local-maximum confidence in yielding ideal window widths, if exist, in event sequences based on minimal occurrences constrained by a constant maximum gap. Results from our preliminary empirical study confirm the applicability of the episode-rule mining algorithm for Web-site traversal-pattern discovery, and show that the first local maximization yielding ideal window widths exists in real data but rarely in synthetic random data sets.}, + address = {New York, NY, USA}, + author = {Dai, H. K.}, + booktitle = {Proceedings of the 9th International Symposium on Information and Communication Technology}, + doi = {10.1145/3287921.3287982}, + isbn = {9781450365390}, + keywords = {Web-site traversal pattern, episode-rule mining, first local maximization}, + month = {12}, + pages = {130--136}, + publisher = {Association for Computing Machinery}, + series = {SoICT '18}, + title = {Episode-Rule Mining with Minimal Occurrences via First Local Maximization in Confidence}, + year = {2018}, +} + +@book{, + editor = {Bhattacharyya, R. and McCormick, M. E.}, + file = {/home/mfg/acad/ext/arts/waves/water/[R._Bhattacharyya_and_M.E._McCormick_(Eds.)]_Wave_(z-lib.org).pdf}, + isbn = {9780080442129}, + keywords = {waves, agua}, + publisher = {Elsevier Science}, + title = {Wave Energy Conversion}, + year = {2013}, +} + +@Comment{jabref-meta: databaseType:bibtex;} + diff --git a/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.xml b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.xml new file mode 100644 index 00000000000..ec211bdcf1c --- /dev/null +++ b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestMultipleEntries.xml @@ -0,0 +1,64 @@ + + + + + 11 + MyLibrary + JabRef + + + Dai, H. K. + + + + Episode-Rule Mining with Minimal Occurrences via First Local Maximization in Confidence + Proceedings of the 9th International Symposium on Information and Communication Technology + + + Web-site traversal pattern + episode-rule mining + first local maximization + + + 2018 + 12 + + 130--136 + Association for Computing Machinery + 9781450365390 + 10.1145/3287921.3287982 + An episode rule of associating two episodes represents a temporal implication of the antecedent episode to the consequent episode. Episode-rule mining is a task of extracting useful patterns/episodes from large event databases. We present an episode-rule mining algorithm for finding frequent and confident serial-episode rules via first local-maximum confidence in yielding ideal window widths, if exist, in event sequences based on minimal occurrences constrained by a constant maximum gap. Results from our preliminary empirical study confirm the applicability of the episode-rule mining algorithm for Web-site traversal-pattern discovery, and show that the first local maximization yielding ideal window widths exists in real data but rarely in synthetic random data sets. + Proceedings of the 9th International Symposium on Information and Communication Technology + SoICT '18 + New York, NY, USA + + + 2 + MyLibrary + JabRef + + + Bhattacharyya, R. + McCormick, M. E. + + + + Wave Energy Conversion + + + waves + agua + + + 2013 + + + + /home/mfg/acad/ext/arts/waves/water/[R._Bhattacharyya_and_M.E._McCormick_(Eds.)]_Wave_(z-lib.org).pdf + + + Elsevier Science + 9780080442129 + + + diff --git a/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.bib b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.bib new file mode 100644 index 00000000000..374c04c234e --- /dev/null +++ b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.bib @@ -0,0 +1,13 @@ +% Encoding: UTF-8 + +@book{, + editor = {Bhattacharyya, R. and McCormick, M. E.}, + file = {/home/mfg/acad/ext/arts/waves/water/[R._Bhattacharyya_and_M.E._McCormick_(Eds.)]_Wave_(z-lib.org).pdf}, + isbn = {9780080442129}, + keywords = {waves, agua}, + publisher = {Elsevier Science}, + title = {Wave Energy Conversion}, + year = {2013}, +} + +@Comment{jabref-meta: databaseType:bibtex;} diff --git a/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.xml b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.xml new file mode 100644 index 00000000000..7a22b58b1ed --- /dev/null +++ b/src/test/resources/org/jabref/logic/exporter/EndnoteXmlExportTestSingleBookEntry.xml @@ -0,0 +1,33 @@ + + + + + 2 + MyLibrary + JabRef + + + Bhattacharyya, R. + McCormick, M. E. + + + + Wave Energy Conversion + + + waves + agua + + + 2013 + + + + /home/mfg/acad/ext/arts/waves/water/[R._Bhattacharyya_and_M.E._McCormick_(Eds.)]_Wave_(z-lib.org).pdf + + + Elsevier Science + 9780080442129 + + + diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.bib index f93199db1af..2a6e553cf7a 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.bib @@ -1,16 +1,18 @@ @article{, - abstract = {test abstract}, - author = {Ahmad, AS and Ormiston-Smith, N and Sasieni, PD}, - doi = {10.1038/bjc.2014.606}, - file = {:file\://localhost/Users/user/Documents/Bookends/Attachments/Ahmad%20et%20al%202015.pdf:PDF}, - isbn = {1532-1827 (Electronic) 0007-0920 (Linking)}, - keywords = {Age Factors; Aged; Aged, 80 and over; Female; Great Britain/epidemiology; Humans; Male; Middle Aged; Models, Statistical; Neoplasms/*epidemiology; Risk Assessment; Risk Factors; Sex Characteristics}, - number = {5}, - pages = {943-7}, - title = {Trends in the lifetime risk of developing cancer in Great Britain: comparison of risk for those born from 1930 to 1960}, - volume = {112}, - year = {2015}, - journal = {Br J Cancer}, - note = {some notes}, - url = {http://www.ncbi.nlm.nih.gov/pubmed/25647015}, + abstract = {test abstract}, + address = {Queen Mary University of London, Centre for Cancer Prevention, Wolfson Institute of Preventive Medicine, Charterhouse Square, London EC1M 6BQ, UK. Cancer Research UK, Head of Statistical Information, Angel Building, 407 St John Street, London EC1V 4AD, UK.}, + author = {Ahmad, AS and Ormiston-Smith, N and Sasieni, PD}, + date = {Mar 3}, + doi = {10.1038/bjc.2014.606}, + file = {file://localhost/Users/user/Documents/Bookends/Attachments/Ahmad%20et%20al%202015.pdf}, + isbn = {1532-1827 (Electronic) 0007-0920 (Linking)}, + journal = {Br J Cancer}, + keywords = {Age Factors; Aged; Aged, 80 and over; Female; Great Britain/epidemiology; Humans; Male; Middle Aged; Models, Statistical; Neoplasms/*epidemiology; Risk Assessment; Risk Factors; Sex Characteristics}, + note = {some notes}, + number = {5}, + pages = {943-7}, + title = {Trends in the lifetime risk of developing cancer in Great Britain: comparison of risk for those born from 1930 to 1960}, + url = {http://www.ncbi.nlm.nih.gov/pubmed/25647015}, + volume = {112}, + year = {2015} } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.xml b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.xml index c3a8bb5de98..6f70289a154 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.xml +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle.xml @@ -100,8 +100,7 @@ - + diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.bib index a7293b79f14..72f00bd8af9 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.bib @@ -1,24 +1,30 @@ @article{, - author = {Chapman, A. G.}, - isbn = {0012-9658}, - number = {1}, - pages = {93-105}, - title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, - volume = {18}, - year = {1937}, - journal = {Ecology}, - note = {some notes}, - url = {://000200148800007} + address = {Cent States Forest Expt Stn, Columbus, OH USA}, + author = {Chapman, A. G.}, + date = {Jan}, + isbn = {0012-9658}, + journal = {Ecology}, + language = {English}, + note = {some notes}, + number = {1}, + pages = {93-105}, + title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, + url = {://000200148800007}, + volume = {18}, + year = {1937} } @book{, - author = {Strohecker, H. F.}, - isbn = {0012-9658}, - number = {1}, - journal = {Ecology}, - note = {some other notes}, - pages = {162-168}, - title = {A survey of soil temperatures in the Chicago area}, - volume = {18}, - url = {://000200148800014}, - year = {1937} + address = {Univ Chicago, Chicago, IL USA}, + booktitle = {Ecology}, + author = {Strohecker, H. F.}, + date = {Jan}, + isbn = {0012-9658}, + language = {English}, + note = {some other notes}, + number = {1}, + pages = {162-168}, + title = {A survey of soil temperatures in the Chicago area}, + url = {://000200148800014}, + volume = {18}, + year = {1937} } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.xml b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.xml index e2ff36845c1..56a7936e07f 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.xml +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestArticle2.xml @@ -77,8 +77,7 @@ - + @@ -166,8 +165,7 @@ - + diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestLabelAndMultiTitle.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestLabelAndMultiTitle.bib index d07d2d77638..d8d124007b0 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestLabelAndMultiTitle.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestLabelAndMultiTitle.bib @@ -1,33 +1,38 @@ @article{, - author = {Chapman, A. G.}, - isbn = {0012-9658}, - number = {1}, - pages = {93-105}, - title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, - volume = {18}, - year = {1937}, - journal = {Ecology}, - note = {some notes}, - url = {://000200148800007} + address = {Cent States Forest Expt Stn, Columbus, OH USA}, + author = {Chapman, A. G.}, + date = {Jan}, + isbn = {0012-9658}, + journal = {Ecology}, + language = {English}, + note = {some notes}, + number = {1}, + pages = {93-105}, + title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, + url = {://000200148800007}, + volume = {18}, + year = {1937} } @book{, - author = {Strohecker, H. F.}, - isbn = {0012-9658}, - number = {1}, - journal = {Ecology}, - note = {some other notes}, - pages = {162-168}, - title = {A survey of soil temperatures in the Chicago area}, - volume = {18}, - url = {://000200148800014}, - year = {1937}, + address = {Univ Chicago, Chicago, IL USA}, + booktitle = {Ecology}, + author = {Strohecker, H. F.}, + date = {Jan}, + isbn = {0012-9658}, + language = {English}, + note = {some other notes}, + number = {1}, + pages = {162-168}, + title = {A survey of soil temperatures in the Chicago area}, + url = {://000200148800014}, + volume = {18}, + year = {1937} } @article{, - author = {Ainley, D.G. and Ribic, C.A. and Wodd, R.C.}, - endnote-label = {B4}, - journal = {Journal of Animal Ecology}, - pages = {1-20}, - title = {A demographic study of the south polar skua Catharacta maccormicki at Cape Crozier}, - volume = {59}, - year = {1990}, + author = {Ainley, D.G. and Ribic, C.A. and Wodd, R.C.}, + journal = {Journal of Animal Ecology}, + pages = {1-20}, + title = {A demographic study of the south polar skua Catharacta maccormicki at Cape Crozier}, + volume = {59}, + year = {1990} } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestReport.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestReport.bib index f8e46826c01..aebaaae8bba 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestReport.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTestReport.bib @@ -1,6 +1,5 @@ -@report{, +@techreport{, author = {Wiggins, D. A.}, - endnote-label = {B1040}, pages = {Available: http://www.fs.fed.us/r2/projects/scp/assessments/purplemartin.pdf}, publisher = {USDA Forest Service, Rocky Mountain Region}, title = {Purple Martin (Progne subis): a technical conservation assessment}, diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_EmptyKeywordStyle.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_EmptyKeywordStyle.bib index 8f3f4d58339..d88e8fd9422 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_EmptyKeywordStyle.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_EmptyKeywordStyle.bib @@ -1,17 +1,18 @@ % Encoding: UTF-8 @Article{, - author = {Lim, RCH}, - title = {Painless Laser Acupuncture for Smoking Cessation.}, - doi = {10.1089/acu.2018.1295}, - endnote-label = {29937971}, - note = {FFT available, not read on 7/2/18}, - number = {3}, - pages = {159-162}, - url = {https://www.ncbi.nlm.nih.gov/pubmed/29937971}, - volume = {30}, - isbn = {1933-6586}, - journal = {Med Acupunct}, - keywords = {anxiety; craving; dependency; destress; health restoration}, - year = {2018}, + address = {Laser Acupuncture Centre, Singapore.}, + author = {Lim, RCH}, + date = {Jun 01}, + doi = {10.1089/acu.2018.1295}, + isbn = {1933-6586}, + journal = {Med Acupunct}, + keywords = {anxiety; craving; dependency; destress; health restoration}, + note = {FFT available, not read on 7/2/18}, + number = {3}, + pages = {159-162}, + title = {Painless Laser Acupuncture for Smoking Cessation.}, + url = {https://www.ncbi.nlm.nih.gov/pubmed/29937971}, + volume = {30}, + year = {2018}, } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.bib index f93199db1af..2a6e553cf7a 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.bib @@ -1,16 +1,18 @@ @article{, - abstract = {test abstract}, - author = {Ahmad, AS and Ormiston-Smith, N and Sasieni, PD}, - doi = {10.1038/bjc.2014.606}, - file = {:file\://localhost/Users/user/Documents/Bookends/Attachments/Ahmad%20et%20al%202015.pdf:PDF}, - isbn = {1532-1827 (Electronic) 0007-0920 (Linking)}, - keywords = {Age Factors; Aged; Aged, 80 and over; Female; Great Britain/epidemiology; Humans; Male; Middle Aged; Models, Statistical; Neoplasms/*epidemiology; Risk Assessment; Risk Factors; Sex Characteristics}, - number = {5}, - pages = {943-7}, - title = {Trends in the lifetime risk of developing cancer in Great Britain: comparison of risk for those born from 1930 to 1960}, - volume = {112}, - year = {2015}, - journal = {Br J Cancer}, - note = {some notes}, - url = {http://www.ncbi.nlm.nih.gov/pubmed/25647015}, + abstract = {test abstract}, + address = {Queen Mary University of London, Centre for Cancer Prevention, Wolfson Institute of Preventive Medicine, Charterhouse Square, London EC1M 6BQ, UK. Cancer Research UK, Head of Statistical Information, Angel Building, 407 St John Street, London EC1V 4AD, UK.}, + author = {Ahmad, AS and Ormiston-Smith, N and Sasieni, PD}, + date = {Mar 3}, + doi = {10.1038/bjc.2014.606}, + file = {file://localhost/Users/user/Documents/Bookends/Attachments/Ahmad%20et%20al%202015.pdf}, + isbn = {1532-1827 (Electronic) 0007-0920 (Linking)}, + journal = {Br J Cancer}, + keywords = {Age Factors; Aged; Aged, 80 and over; Female; Great Britain/epidemiology; Humans; Male; Middle Aged; Models, Statistical; Neoplasms/*epidemiology; Risk Assessment; Risk Factors; Sex Characteristics}, + note = {some notes}, + number = {5}, + pages = {943-7}, + title = {Trends in the lifetime risk of developing cancer in Great Britain: comparison of risk for those born from 1930 to 1960}, + url = {http://www.ncbi.nlm.nih.gov/pubmed/25647015}, + volume = {112}, + year = {2015} } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.xml b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.xml index 768ba5af391..e2cccc24a8e 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.xml +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle.xml @@ -100,8 +100,7 @@ - + diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.bib b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.bib index a7293b79f14..72f00bd8af9 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.bib +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.bib @@ -1,24 +1,30 @@ @article{, - author = {Chapman, A. G.}, - isbn = {0012-9658}, - number = {1}, - pages = {93-105}, - title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, - volume = {18}, - year = {1937}, - journal = {Ecology}, - note = {some notes}, - url = {://000200148800007} + address = {Cent States Forest Expt Stn, Columbus, OH USA}, + author = {Chapman, A. G.}, + date = {Jan}, + isbn = {0012-9658}, + journal = {Ecology}, + language = {English}, + note = {some notes}, + number = {1}, + pages = {93-105}, + title = {An ecological basis for reforestation of submariginal lands in the Central Hardwood Region}, + url = {://000200148800007}, + volume = {18}, + year = {1937} } @book{, - author = {Strohecker, H. F.}, - isbn = {0012-9658}, - number = {1}, - journal = {Ecology}, - note = {some other notes}, - pages = {162-168}, - title = {A survey of soil temperatures in the Chicago area}, - volume = {18}, - url = {://000200148800014}, - year = {1937} + address = {Univ Chicago, Chicago, IL USA}, + booktitle = {Ecology}, + author = {Strohecker, H. F.}, + date = {Jan}, + isbn = {0012-9658}, + language = {English}, + note = {some other notes}, + number = {1}, + pages = {162-168}, + title = {A survey of soil temperatures in the Chicago area}, + url = {://000200148800014}, + volume = {18}, + year = {1937} } diff --git a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.xml b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.xml index 1050edd3f0f..4dd31030ab1 100644 --- a/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.xml +++ b/src/test/resources/org/jabref/logic/importer/fileformat/EndnoteXmlImporterTest_WithoutUrlStyle2.xml @@ -77,8 +77,7 @@ - + @@ -166,8 +165,7 @@ - +