repo stringlengths 1 191 ⌀ | file stringlengths 23 351 | code stringlengths 0 5.32M | file_length int64 0 5.32M | avg_line_length float64 0 2.9k | max_line_length int64 0 288k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/GvkFetcher.java | package org.jabref.logic.importer.fetcher;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.Optional;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.SearchBasedParserFetcher;
import org.jabref.logic.importer.fetcher.transformers.GVKQueryTransformer;
import org.jabref.logic.importer.fileformat.PicaXmlParser;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
public class GvkFetcher implements SearchBasedParserFetcher {
private static final String URL_PATTERN = "http://sru.gbv.de/gvk?";
/**
* Searchkeys are used to specify a search request. For example "tit" stands for "title".
* If no searchkey is used, the default searchkey "all" is used.
*/
private final Collection<String> searchKeys = Arrays.asList("all", "tit", "per", "thm", "slw", "txt", "num", "kon", "ppn", "bkl", "erj");
@Override
public String getName() {
return "GVK";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_GVK);
}
@Override
public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(URL_PATTERN);
uriBuilder.addParameter("version", "1.1");
uriBuilder.addParameter("operation", "searchRetrieve");
uriBuilder.addParameter("query", new GVKQueryTransformer().transformLuceneQuery(luceneQuery).orElse(""));
uriBuilder.addParameter("maximumRecords", "50");
uriBuilder.addParameter("recordSchema", "picaxml");
uriBuilder.addParameter("sortKeys", "Year,,1");
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
return new PicaXmlParser();
}
}
| 2,051 | 35 | 141 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/IEEE.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ImporterPreferences;
import org.jabref.logic.importer.PagedSearchBasedParserFetcher;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fetcher.transformers.IEEEQueryTransformer;
import org.jabref.logic.net.URLDownload;
import org.jabref.logic.preferences.FetcherApiKey;
import org.jabref.logic.util.BuildInfo;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.entry.types.StandardEntryType;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for finding PDF URLs for entries on IEEE.
* Will first look for URLs of the type <code>https://ieeexplore.ieee.org/stamp/stamp.jsp?[tp=&]arnumber=...</code>.
* If not found, will resolve the DOI, if it starts with 10.1109, and try to find a similar link on the HTML page.
*
* @see <a href="https://developer.ieee.org/docs">API documentation</a>
*/
public class IEEE implements FulltextFetcher, PagedSearchBasedParserFetcher, CustomizableKeyFetcher {
public static final String FETCHER_NAME = "IEEEXplore";
private static final Logger LOGGER = LoggerFactory.getLogger(IEEE.class);
private static final String STAMP_BASE_STRING_DOCUMENT = "/stamp/stamp.jsp?tp=&arnumber=";
private static final Pattern STAMP_PATTERN = Pattern.compile("(/stamp/stamp.jsp\\?t?p?=?&?arnumber=[0-9]+)");
private static final Pattern DOCUMENT_PATTERN = Pattern.compile("document/([0-9]+)/");
private static final Pattern PDF_PATTERN = Pattern.compile("\"(https://ieeexplore.ieee.org/ielx[0-9/]+\\.pdf[^\"]+)\"");
private static final String IEEE_DOI = "10.1109";
private static final String BASE_URL = "https://ieeexplore.ieee.org";
private static final String API_KEY = new BuildInfo().ieeeAPIKey;
private static final String TEST_URL_WITHOUT_API_KEY = "https://ieeexploreapi.ieee.org/api/v1/search/articles?max_records=0&apikey=";
private final ImportFormatPreferences importFormatPreferences;
private final ImporterPreferences importerPreferences;
private IEEEQueryTransformer transformer;
public IEEE(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) {
this.importFormatPreferences = Objects.requireNonNull(importFormatPreferences);
this.importerPreferences = Objects.requireNonNull(importerPreferences);
}
/**
* @implNote <a href="https://developer.ieee.org/docs/read/Metadata_API_responses">documentation</a>
*/
private static BibEntry parseJsonResponse(JSONObject jsonEntry, Character keywordSeparator) {
BibEntry entry = new BibEntry();
switch (jsonEntry.optString("content_type")) {
case "Books" -> entry.setType(StandardEntryType.Book);
case "Conferences" -> entry.setType(StandardEntryType.InProceedings);
case "Courses" -> entry.setType(StandardEntryType.Misc);
default -> entry.setType(StandardEntryType.Article);
}
entry.setField(StandardField.ABSTRACT, jsonEntry.optString("abstract"));
// entry.setField(StandardField.IEEE_ID, jsonEntry.optString("article_number"));
final List<String> authors = new ArrayList<>();
JSONObject authorsContainer = jsonEntry.optJSONObject("authors");
authorsContainer.getJSONArray("authors").forEach(authorPure -> {
JSONObject author = (JSONObject) authorPure;
authors.add(author.optString("full_name"));
});
entry.setField(StandardField.AUTHOR, String.join(" and ", authors));
entry.setField(StandardField.LOCATION, jsonEntry.optString("conference_location"));
entry.setField(StandardField.DOI, jsonEntry.optString("doi"));
entry.setField(StandardField.YEAR, jsonEntry.optString("publication_year"));
entry.setField(StandardField.PAGES, jsonEntry.optString("start_page") + "--" + jsonEntry.optString("end_page"));
JSONObject keywordsContainer = jsonEntry.optJSONObject("index_terms");
if (keywordsContainer != null) {
if (keywordsContainer.has("ieee_terms")) {
keywordsContainer.getJSONObject("ieee_terms").getJSONArray("terms").forEach(data -> {
String keyword = (String) data;
entry.addKeyword(keyword, keywordSeparator);
});
}
if (keywordsContainer.has("author_terms")) {
keywordsContainer.getJSONObject("author_terms").getJSONArray("terms").forEach(data -> {
String keyword = (String) data;
entry.addKeyword(keyword, keywordSeparator);
});
}
}
entry.setField(StandardField.ISBN, jsonEntry.optString("isbn"));
entry.setField(StandardField.ISSN, jsonEntry.optString("issn"));
entry.setField(StandardField.ISSUE, jsonEntry.optString("issue"));
try {
entry.addFile(new LinkedFile(new URL(jsonEntry.optString("pdf_url")), "PDF"));
} catch (MalformedURLException e) {
LOGGER.error("Fetched PDF URL String is malformed.");
}
entry.setField(StandardField.JOURNALTITLE, jsonEntry.optString("publication_title"));
entry.setField(StandardField.DATE, jsonEntry.optString("publication_date"));
entry.setField(StandardField.EVENTTITLEADDON, jsonEntry.optString("conference_location"));
entry.setField(StandardField.EVENTDATE, jsonEntry.optString("conference_dates"));
entry.setField(StandardField.PUBLISHER, jsonEntry.optString("publisher"));
entry.setField(StandardField.TITLE, jsonEntry.optString("title"));
entry.setField(StandardField.VOLUME, jsonEntry.optString("volume"));
return entry;
}
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException {
Objects.requireNonNull(entry);
String stampString = "";
// Try URL first -- will primarily work for entries from the old IEEE search
Optional<String> urlString = entry.getField(StandardField.URL);
if (urlString.isPresent()) {
Matcher documentUrlMatcher = DOCUMENT_PATTERN.matcher(urlString.get());
if (documentUrlMatcher.find()) {
String docId = documentUrlMatcher.group(1);
stampString = STAMP_BASE_STRING_DOCUMENT + docId;
}
// You get this url if you export bibtex from IEEE
Matcher stampMatcher = STAMP_PATTERN.matcher(urlString.get());
if (stampMatcher.find()) {
// Found it
stampString = stampMatcher.group(1);
}
}
// If not, try DOI
if (stampString.isEmpty()) {
Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse);
if (doi.isPresent() && doi.get().getDOI().startsWith(IEEE_DOI) && doi.get().getExternalURI().isPresent()) {
// Download the HTML page from IEEE
URLDownload urlDownload = new URLDownload(doi.get().getExternalURI().get().toURL());
// We don't need to modify the cookies, but we need support for them
urlDownload.getCookieFromUrl();
String resolvedDOIPage = urlDownload.asString();
// Try to find the link
Matcher matcher = STAMP_PATTERN.matcher(resolvedDOIPage);
if (matcher.find()) {
// Found it
stampString = matcher.group(1);
}
}
}
// Any success?
if (stampString.isEmpty()) {
return Optional.empty();
}
// Download the HTML page containing a frame with the PDF
URLDownload urlDownload = new URLDownload(BASE_URL + stampString);
// We don't need to modify the cookies, but we need support for them
urlDownload.getCookieFromUrl();
String framePage = urlDownload.asString();
// Try to find the direct PDF link
Matcher matcher = PDF_PATTERN.matcher(framePage);
if (matcher.find()) {
// The PDF was found
LOGGER.debug("Full text document found on IEEE Xplore");
return Optional.of(new URL(matcher.group(1)));
}
return Optional.empty();
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.PUBLISHER;
}
@Override
public Parser getParser() {
return inputStream -> {
String response = new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining(OS.NEWLINE));
JSONObject jsonObject = new JSONObject(response);
List<BibEntry> entries = new ArrayList<>();
if (jsonObject.has("articles")) {
JSONArray results = jsonObject.getJSONArray("articles");
for (int i = 0; i < results.length(); i++) {
JSONObject jsonEntry = results.getJSONObject(i);
BibEntry entry = parseJsonResponse(jsonEntry, importFormatPreferences.bibEntryPreferences().getKeywordSeparator());
boolean addEntry;
// In case entry has no year, add it
// In case an entry has a year, check if its in the year range
// The implementation uses some Java 8 Optional magic to implement that
if (entry.hasField(StandardField.YEAR)) {
addEntry = entry.getField(StandardField.YEAR).filter(year -> {
int yearAsInteger = Integer.parseInt(year);
return
transformer.getStartYear().map(startYear -> yearAsInteger >= startYear).orElse(true) &&
transformer.getEndYear().map(endYear -> yearAsInteger <= endYear).orElse(true);
}).isPresent();
} else {
addEntry = true;
}
if (addEntry) {
entries.add(entry);
}
}
}
return entries;
};
}
@Override
public String getName() {
return FETCHER_NAME;
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_IEEEXPLORE);
}
private String getApiKey() {
return importerPreferences.getApiKeys()
.stream()
.filter(key -> key.getName().equalsIgnoreCase(this.getName()))
.filter(FetcherApiKey::shouldUse)
.findFirst()
.map(FetcherApiKey::getKey)
.orElse(API_KEY);
}
@Override
public String getTestUrl() {
return TEST_URL_WITHOUT_API_KEY;
}
@Override
public URL getURLForQuery(QueryNode luceneQuery, int pageNumber) throws URISyntaxException, MalformedURLException, FetcherException {
// transformer is stored globally, because we need to filter out the bib entries by the year manually
// the transformer stores the min and max year
transformer = new IEEEQueryTransformer();
String transformedQuery = transformer.transformLuceneQuery(luceneQuery).orElse("");
URIBuilder uriBuilder = new URIBuilder("https://ieeexploreapi.ieee.org/api/v1/search/articles");
uriBuilder.addParameter("apikey", getApiKey());
if (!transformedQuery.isBlank()) {
uriBuilder.addParameter("querytext", transformedQuery);
}
uriBuilder.addParameter("max_records", String.valueOf(getPageSize()));
// Currently not working as part of the query string
if (transformer.getJournal().isPresent()) {
uriBuilder.addParameter("publication_title", transformer.getJournal().get());
}
if (transformer.getStartYear().isPresent()) {
uriBuilder.addParameter("start_year", String.valueOf(transformer.getStartYear().get()));
}
if (transformer.getEndYear().isPresent()) {
uriBuilder.addParameter("end_year", String.valueOf(transformer.getEndYear().get()));
}
if (transformer.getArticleNumber().isPresent()) {
uriBuilder.addParameter("article_number", transformer.getArticleNumber().get());
}
// Starts to index at 1 for the first entry
uriBuilder.addParameter("start_record", String.valueOf(getPageSize() * pageNumber) + 1);
return uriBuilder.build().toURL();
}
}
| 13,687 | 44.932886 | 137 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/INSPIREFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.SearchBasedParserFetcher;
import org.jabref.logic.importer.fetcher.transformers.DefaultLuceneQueryTransformer;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.importer.util.MediaTypes;
import org.jabref.logic.layout.format.LatexToUnicodeFormatter;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
/**
* Fetches data from the INSPIRE database.
*/
public class INSPIREFetcher implements SearchBasedParserFetcher, EntryBasedFetcher {
private static final String INSPIRE_HOST = "https://inspirehep.net/api/literature/";
private static final String INSPIRE_DOI_HOST = "https://inspirehep.net/api/doi/";
private static final String INSPIRE_ARXIV_HOST = "https://inspirehep.net/api/arxiv/";
private final ImportFormatPreferences importFormatPreferences;
public INSPIREFetcher(ImportFormatPreferences preferences) {
this.importFormatPreferences = preferences;
}
@Override
public String getName() {
return "INSPIRE";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_INSPIRE);
}
@Override
public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(INSPIRE_HOST);
uriBuilder.addParameter("q", new DefaultLuceneQueryTransformer().transformLuceneQuery(luceneQuery).orElse(""));
return uriBuilder.build().toURL();
}
@Override
public URLDownload getUrlDownload(URL url) {
URLDownload download = new URLDownload(url);
download.addHeader("Accept", MediaTypes.APPLICATION_BIBTEX);
return download;
}
@Override
public void doPostCleanup(BibEntry entry) {
// Remove strange "SLACcitation" field
new FieldFormatterCleanup(new UnknownField("SLACcitation"), new ClearFormatter()).cleanup(entry);
// Remove braces around content of "title" field
new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.TITLE, new LatexToUnicodeFormatter()).cleanup(entry);
}
@Override
public Parser getParser() {
return new BibtexParser(importFormatPreferences);
}
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
List<BibEntry> results = new ArrayList<>();
Optional<String> doi = entry.getField(StandardField.DOI);
Optional<String> archiveprefix = entry.getFieldOrAlias(StandardField.ARCHIVEPREFIX);
Optional<String> eprint = entry.getField(StandardField.EPRINT);
String url;
if ("arXiv".equals(archiveprefix.get()) && !eprint.isEmpty()) {
url = INSPIRE_ARXIV_HOST + eprint.get();
} else if (!doi.isEmpty()) {
url = INSPIRE_DOI_HOST + doi.get();
} else {
return results;
}
try {
URLDownload download = getUrlDownload(new URL(url));
results = getParser().parseEntries(download.asInputStream());
results.forEach(this::doPostCleanup);
return results;
} catch (IOException | ParseException e) {
throw new FetcherException("Error occurred during fetching", e);
}
}
}
| 4,355 | 37.210526 | 121 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/IacrEprintFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.URL;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.IdBasedFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
public class IacrEprintFetcher implements FulltextFetcher, IdBasedFetcher {
public static final String NAME = "IACR eprints";
private static final Pattern WITHOUT_LETTERS_SPACE = Pattern.compile("[^0-9/]");
private static final Predicate<String> IDENTIFIER_PREDICATE = Pattern.compile("\\d{4}/\\d{3,5}").asPredicate();
private static final String CITATION_URL_PREFIX = "https://eprint.iacr.org/";
private static final String DESCRIPTION_URL_PREFIX = "https://eprint.iacr.org/";
private static final String FULLTEXT_URL_PREFIX = "https://eprint.iacr.org/";
private static final String VERSION_URL_PREFIX = "https://eprint.iacr.org/archive/versions/";
private final ImportFormatPreferences prefs;
public IacrEprintFetcher(ImportFormatPreferences prefs) {
this.prefs = prefs;
}
@Override
public Optional<BibEntry> performSearchById(String identifier) throws FetcherException {
String identifierWithoutLettersAndSpaces = WITHOUT_LETTERS_SPACE.matcher(identifier).replaceAll(" ").trim();
if (!IDENTIFIER_PREDICATE.test(identifierWithoutLettersAndSpaces)) {
throw new FetcherException(Localization.lang("Invalid identifier: '%0'.", identifier));
}
Optional<BibEntry> entry = createEntryFromIacrCitation(identifierWithoutLettersAndSpaces);
if (entry.isPresent()) {
setAdditionalFields(entry.get(), identifierWithoutLettersAndSpaces);
}
return entry;
}
private Optional<BibEntry> createEntryFromIacrCitation(String validIdentifier) throws FetcherException {
String bibtexCitationHtml = getHtml(CITATION_URL_PREFIX + validIdentifier);
if (bibtexCitationHtml.contains("No such report found")) {
throw new FetcherException(Localization.lang("No results found."));
}
String actualEntry = getRequiredValueBetween("<pre id=\"bibtex\">", "</pre>", bibtexCitationHtml);
try {
return BibtexParser.singleFromString(actualEntry, prefs);
} catch (ParseException e) {
throw new FetcherException(Localization.lang("Entry from %0 could not be parsed.", "IACR"), e);
}
}
private void setAdditionalFields(BibEntry entry, String identifier) throws FetcherException {
String entryUrl = DESCRIPTION_URL_PREFIX + identifier;
String descriptiveHtml = getHtml(entryUrl);
entry.setField(StandardField.ABSTRACT, getAbstract(descriptiveHtml));
entry.setField(StandardField.DATE, getDate(descriptiveHtml));
// Version information for entries after year 2000
if (isFromOrAfterYear2000(entry)) {
String entryVersion = VERSION_URL_PREFIX + identifier;
String versionHtml = getHtml(entryVersion);
String version = getVersion(identifier, versionHtml);
entry.setField(StandardField.VERSION, version);
entry.setField(StandardField.URL, entryUrl + "/" + version);
}
}
private String getVersion(String identifier, String versionHtml) throws FetcherException {
String startOfVersionString = "<li><a href=\"/archive/" + identifier + "/";
String version = getRequiredValueBetween(startOfVersionString, "\">", versionHtml);
return version;
}
private String getAbstract(String descriptiveHtml) throws FetcherException {
String startOfAbstractString = "<h5 class=\"mt-3\">Abstract</h5>\n <p style=\"white-space: pre-wrap;\">";
String abstractText = getRequiredValueBetween(startOfAbstractString, "</p>", descriptiveHtml);
return abstractText;
}
private String getDate(String descriptiveHtml) throws FetcherException {
String startOfHistoryString = "<dt>History</dt>\n \n \n <dd>";
String dateStringAsInHtml = getRequiredValueBetween(startOfHistoryString, ":", descriptiveHtml);
return dateStringAsInHtml;
}
private String getHtml(String url) throws FetcherException {
try {
URLDownload download = new URLDownload(url);
return download.asString();
} catch (IOException e) {
throw new FetcherException(Localization.lang("Could not retrieve entry data from '%0'.", url), e);
}
}
private String getRequiredValueBetween(String from, String to, String haystack) throws FetcherException {
String value = StringUtil.substringBetween(haystack, from, to);
if (value == null) {
throw new FetcherException(Localization.lang("Entry from %0 could not be parsed.", "IACR"));
} else {
return value;
}
}
private boolean isFromOrAfterYear2000(BibEntry entry) throws FetcherException {
Optional<String> yearField = entry.getField(StandardField.YEAR);
if (yearField.isPresent()) {
return Integer.parseInt(yearField.get()) > 2000;
}
throw new FetcherException(Localization.lang("Entry from %0 could not be parsed.", "IACR"));
}
@Override
public String getName() {
return NAME;
}
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException {
Objects.requireNonNull(entry);
Optional<String> urlField = entry.getField(StandardField.URL);
if (urlField.isPresent()) {
String descriptiveHtml = getHtml(urlField.get());
String startOfFulltextLink = "<a class=\"btn btn-sm btn-outline-dark\"";
String fulltextLinkAsInHtml = getRequiredValueBetween(startOfFulltextLink, ".pdf", descriptiveHtml);
// There is an additional "\n href=\"/archive/" we have to remove - and for some reason,
// getRequiredValueBetween refuses to match across the line break.
fulltextLinkAsInHtml = fulltextLinkAsInHtml.replaceFirst(".*href=\"/", "").trim();
String fulltextLink = FULLTEXT_URL_PREFIX + fulltextLinkAsInHtml + ".pdf";
return Optional.of(new URL(fulltextLink));
}
return Optional.empty();
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.PREPRINT;
}
}
| 6,953 | 42.4625 | 116 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/JstorFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.SearchBasedParserFetcher;
import org.jabref.logic.importer.fetcher.transformers.JstorQueryTransformer;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
/**
* Fetcher for jstor.org
**/
public class JstorFetcher implements SearchBasedParserFetcher, FulltextFetcher, IdBasedParserFetcher {
private static final String HOST = "https://www.jstor.org";
private static final String SEARCH_HOST = HOST + "/open/search";
private static final String CITE_HOST = HOST + "/citation/text/";
private static final String URL_QUERY_REGEX = "(?<=\\?).*";
private final ImportFormatPreferences importFormatPreferences;
public JstorFetcher(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
@Override
public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(SEARCH_HOST);
uriBuilder.addParameter("Query", new JstorQueryTransformer().transformLuceneQuery(luceneQuery).orElse(""));
return uriBuilder.build().toURL();
}
@Override
public URL getUrlForIdentifier(String identifier) throws FetcherException {
String start = "https://www.jstor.org/citation/text/";
if (identifier.startsWith("http")) {
identifier = identifier.replace("https://www.jstor.org/stable", "");
identifier = identifier.replace("http://www.jstor.org/stable", "");
}
identifier = identifier.replaceAll(URL_QUERY_REGEX, "");
try {
if (identifier.contains("/")) {
// if identifier links to a entry with a valid doi
return new URL(start + identifier);
}
// else use default doi start.
return new URL(start + "10.2307/" + identifier);
} catch (IOException e) {
throw new FetcherException("could not construct url for jstor", e);
}
}
@Override
public Parser getParser() {
return inputStream -> {
BibtexParser parser = new BibtexParser(importFormatPreferences);
String text = new BufferedReader(
new InputStreamReader(inputStream, StandardCharsets.UTF_8)).lines().collect(Collectors.joining());
// does the input stream contain bibtex ?
if (text.startsWith("@")) {
return parser.parseEntries(text);
}
// input stream contains html
List<BibEntry> entries;
try {
Document doc = Jsoup.parse(inputStream, null, HOST);
StringBuilder stringBuilder = new StringBuilder();
List<Element> elements = doc.body().getElementsByClass("cite-this-item");
for (Element element : elements) {
String id = element.attr("href").replace("citation/info/", "");
String data = new URLDownload(CITE_HOST + id).asString();
stringBuilder.append(data);
}
entries = new ArrayList<>(parser.parseEntries(stringBuilder.toString()));
} catch (IOException e) {
throw new ParseException("Could not download data from jstor.org", e);
}
return entries;
};
}
@Override
public String getName() {
return "JSTOR";
}
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException {
if (entry.getField(StandardField.URL).isEmpty()) {
return Optional.empty();
}
String page = new URLDownload(entry.getField(StandardField.URL).get()).asString();
Document doc = Jsoup.parse(page);
List<Element> elements = doc.getElementsByAttribute("data-doi");
if (elements.size() != 1) {
return Optional.empty();
}
String url = elements.get(0).attr("href");
return Optional.of(new URL(url));
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.META_SEARCH;
}
@Override
public void doPostCleanup(BibEntry entry) {
// do nothing
}
}
| 5,287 | 35.722222 | 121 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/LibraryOfCongress.java | package org.jabref.logic.importer.fetcher;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fileformat.ModsImporter;
import org.apache.http.client.utils.URIBuilder;
/**
* Fetcher for the Library of Congress Control Number (LCCN) using https://lccn.loc.gov/
*/
public class LibraryOfCongress implements IdBasedParserFetcher {
private final ImportFormatPreferences importFormatPreferences;
public LibraryOfCongress(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
@Override
public String getName() {
return "Library of Congress";
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder("https://lccn.loc.gov/" + identifier + "/mods");
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
return new ModsImporter(this.importFormatPreferences);
}
}
| 1,325 | 30.571429 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/MathSciNet.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.jabref.logic.cleanup.DoiCleanup;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.cleanup.MoveFieldCleanup;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.logic.importer.EntryBasedParserFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.SearchBasedParserFetcher;
import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.AMSField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
/**
* Fetches data from the MathSciNet (http://www.ams.org/mathscinet)
*/
public class MathSciNet implements SearchBasedParserFetcher, EntryBasedParserFetcher, IdBasedParserFetcher {
private final ImportFormatPreferences preferences;
public MathSciNet(ImportFormatPreferences preferences) {
this.preferences = Objects.requireNonNull(preferences);
}
@Override
public String getName() {
return "MathSciNet";
}
/**
* We use MR Lookup (http://www.ams.org/mrlookup) instead of the usual search since this tool is also available
* without subscription and, moreover, is optimized for finding a publication based on partial information.
*/
@Override
public URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException, FetcherException {
Optional<String> mrNumberInEntry = entry.getField(StandardField.MR_NUMBER);
if (mrNumberInEntry.isPresent()) {
// We are lucky and already know the id, so use it instead
return getUrlForIdentifier(mrNumberInEntry.get());
}
URIBuilder uriBuilder = new URIBuilder("https://mathscinet.ams.org/mrlookup");
uriBuilder.addParameter("format", "bibtex");
entry.getFieldOrAlias(StandardField.TITLE).ifPresent(title -> uriBuilder.addParameter("ti", title));
entry.getFieldOrAlias(StandardField.AUTHOR).ifPresent(author -> uriBuilder.addParameter("au", author));
entry.getFieldOrAlias(StandardField.JOURNAL).ifPresent(journal -> uriBuilder.addParameter("jrnl", journal));
entry.getFieldOrAlias(StandardField.YEAR).ifPresent(year -> uriBuilder.addParameter("year", year));
return uriBuilder.build().toURL();
}
@Override
public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder("https://mathscinet.ams.org/mathscinet/search/publications.html");
uriBuilder.addParameter("pg7", "ALLF"); // search all fields
uriBuilder.addParameter("s7", new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); // query
uriBuilder.addParameter("r", "1"); // start index
uriBuilder.addParameter("extend", "1"); // should return up to 100 items (instead of default 10)
uriBuilder.addParameter("fmt", "bibtex"); // BibTeX format
return uriBuilder.build().toURL();
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder("https://mathscinet.ams.org/mathscinet/search/publications.html");
uriBuilder.addParameter("pg1", "MR"); // search MR number
uriBuilder.addParameter("s1", identifier); // identifier
uriBuilder.addParameter("fmt", "bibtex"); // BibTeX format
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
// MathSciNet returns the BibTeX result embedded in HTML
// So we extract the BibTeX string from the <pre>bibtex</pre> tags and pass the content to the BibTeX parser
return inputStream -> {
String response = new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining(OS.NEWLINE));
List<BibEntry> entries = new ArrayList<>();
BibtexParser bibtexParser = new BibtexParser(preferences);
Pattern pattern = Pattern.compile("<pre>(?s)(.*)</pre>");
Matcher matcher = pattern.matcher(response);
while (matcher.find()) {
String bibtexEntryString = matcher.group();
entries.addAll(bibtexParser.parseEntries(bibtexEntryString));
}
return entries;
};
}
@Override
public void doPostCleanup(BibEntry entry) {
new MoveFieldCleanup(AMSField.FJOURNAL, StandardField.JOURNAL).cleanup(entry);
new MoveFieldCleanup(new UnknownField("mrclass"), StandardField.KEYWORDS).cleanup(entry);
new FieldFormatterCleanup(new UnknownField("mrreviewer"), new ClearFormatter()).cleanup(entry);
new DoiCleanup().cleanup(entry);
new FieldFormatterCleanup(StandardField.URL, new ClearFormatter()).cleanup(entry);
// Remove comments: MathSciNet prepends a <pre> html tag
entry.setCommentsBeforeEntry("");
}
}
| 5,861 | 45.15748 | 133 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/MedlineFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamConstants;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeMonthFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeNamesFormatter;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.SearchBasedFetcher;
import org.jabref.logic.importer.fetcher.transformers.MedlineQueryTransformer;
import org.jabref.logic.importer.fileformat.MedlineImporter;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Fetch or search from PubMed <a href="http://www.ncbi.nlm.nih.gov/sites/entrez/">www.ncbi.nlm.nih.gov</a>
* The MedlineFetcher fetches the entries from the PubMed database.
* See <a href="https://docs.jabref.org/import-export/medlineris">docs.jabref.org</a> for a detailed documentation of the available fields.
*/
public class MedlineFetcher implements IdBasedParserFetcher, SearchBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(MedlineFetcher.class);
private static final int NUMBER_TO_FETCH = 50;
private static final String ID_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi";
private static final String SEARCH_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi";
private int numberOfResultsFound;
/**
* When using 'esearch.fcgi?db=<database>&term=<query>' we will get a list of IDs matching the query.
* Input: Any text query (&term)
* Output: List of UIDs matching the query
*
* @see <a href="https://www.ncbi.nlm.nih.gov/books/NBK25500/">www.ncbi.nlm.nih.gov/books/NBK25500/</a>
*/
private List<String> getPubMedIdsFromQuery(String query) throws FetcherException {
boolean fetchIDs = false;
boolean firstOccurrenceOfCount = false;
List<String> idList = new ArrayList<>();
try {
URL ncbi = createSearchUrl(query);
XMLInputFactory inputFactory = XMLInputFactory.newFactory();
XMLStreamReader streamReader = inputFactory.createXMLStreamReader(ncbi.openStream());
fetchLoop:
while (streamReader.hasNext()) {
int event = streamReader.getEventType();
switch (event) {
case XMLStreamConstants.START_ELEMENT:
if ("Count".equals(streamReader.getName().toString())) {
firstOccurrenceOfCount = true;
}
if ("IdList".equals(streamReader.getName().toString())) {
fetchIDs = true;
}
break;
case XMLStreamConstants.CHARACTERS:
if (firstOccurrenceOfCount) {
numberOfResultsFound = Integer.parseInt(streamReader.getText());
firstOccurrenceOfCount = false;
}
if (fetchIDs) {
idList.add(streamReader.getText());
}
break;
case XMLStreamConstants.END_ELEMENT:
// Everything relevant is listed before the IdList. So we break the loop right after the IdList tag closes.
if ("IdList".equals(streamReader.getName().toString())) {
break fetchLoop;
}
}
streamReader.next();
}
streamReader.close();
return idList;
} catch (IOException | URISyntaxException e) {
throw new FetcherException("Unable to get PubMed IDs", Localization.lang("Unable to get PubMed IDs"), e);
} catch (XMLStreamException e) {
throw new FetcherException("Error while parsing ID list", Localization.lang("Error while parsing ID list"),
e);
}
}
@Override
public String getName() {
return "Medline/PubMed";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_MEDLINE);
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(ID_URL);
uriBuilder.addParameter("db", "pubmed");
uriBuilder.addParameter("retmode", "xml");
uriBuilder.addParameter("id", identifier);
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
return new MedlineImporter();
}
@Override
public void doPostCleanup(BibEntry entry) {
new FieldFormatterCleanup(new UnknownField("journal-abbreviation"), new ClearFormatter()).cleanup(entry);
new FieldFormatterCleanup(new UnknownField("status"), new ClearFormatter()).cleanup(entry);
new FieldFormatterCleanup(new UnknownField("copyright"), new ClearFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.MONTH, new NormalizeMonthFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.AUTHOR, new NormalizeNamesFormatter()).cleanup(entry);
}
private URL createSearchUrl(String query) throws URISyntaxException, MalformedURLException {
URIBuilder uriBuilder = new URIBuilder(SEARCH_URL);
uriBuilder.addParameter("db", "pubmed");
uriBuilder.addParameter("sort", "relevance");
uriBuilder.addParameter("retmax", String.valueOf(NUMBER_TO_FETCH));
uriBuilder.addParameter("term", query); // already lucene query
return uriBuilder.build().toURL();
}
/**
* Fetch and parse an medline item from eutils.ncbi.nlm.nih.gov.
* The E-utilities generate a huge XML file containing all entries for the ids
*
* @param ids A list of IDs to search for.
* @return Will return an empty list on error.
*/
private List<BibEntry> fetchMedline(List<String> ids) throws FetcherException {
try {
// Separate the IDs with a comma to search multiple entries
URL fetchURL = getUrlForIdentifier(String.join(",", ids));
URLConnection data = fetchURL.openConnection();
ParserResult result = new MedlineImporter().importDatabase(
new BufferedReader(new InputStreamReader(data.getInputStream(), StandardCharsets.UTF_8)));
if (result.hasWarnings()) {
LOGGER.warn(result.getErrorMessage());
}
List<BibEntry> resultList = result.getDatabase().getEntries();
resultList.forEach(this::doPostCleanup);
return resultList;
} catch (URISyntaxException | MalformedURLException e) {
throw new FetcherException("Error while generating fetch URL",
Localization.lang("Error while generating fetch URL"), e);
} catch (IOException e) {
throw new FetcherException("Error while fetching from Medline",
Localization.lang("Error while fetching from %0", "Medline"), e);
}
}
@Override
public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException {
List<BibEntry> entryList;
MedlineQueryTransformer transformer = new MedlineQueryTransformer();
Optional<String> transformedQuery = transformer.transformLuceneQuery(luceneQuery);
if (transformedQuery.isEmpty() || transformedQuery.get().isBlank()) {
return Collections.emptyList();
} else {
// searching for pubmed ids matching the query
List<String> idList = getPubMedIdsFromQuery(transformedQuery.get());
if (idList.isEmpty()) {
LOGGER.info("No results found.");
return Collections.emptyList();
}
if (numberOfResultsFound > NUMBER_TO_FETCH) {
LOGGER.info(
numberOfResultsFound + " results found. Only 50 relevant results will be fetched by default.");
}
// pass the list of ids to fetchMedline to download them. like a id fetcher for mutliple ids
entryList = fetchMedline(idList);
return entryList;
}
}
}
| 9,429 | 42.256881 | 139 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/Medra.java | package org.jabref.logic.importer.fetcher;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.Optional;
import java.util.stream.IntStream;
import org.jabref.logic.cleanup.DoiCleanup;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.util.JsonReader;
import org.jabref.logic.importer.util.MediaTypes;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
/**
* A class for fetching DOIs from Medra
*
* @see <a href="https://data.medra.org">mEDRA Content Negotiation API</a> for an overview of the API
* <p>
* It requires "Accept" request Header attribute to be set to desired content-type.
*/
public class Medra implements IdBasedParserFetcher {
public static final String API_URL = "https://data.medra.org";
@Override
public String getName() {
return "mEDRA";
}
@Override
public Parser getParser() {
return inputStream -> {
JSONObject response = JsonReader.toJsonObject(inputStream);
if (response.isEmpty()) {
return Collections.emptyList();
}
return Collections.singletonList(jsonItemToBibEntry(response));
};
}
private BibEntry jsonItemToBibEntry(JSONObject item) throws ParseException {
try {
return new BibEntry(convertType(item.getString("type")))
.withField(StandardField.TITLE, item.getString("title"))
.withField(StandardField.AUTHOR, toAuthors(item.optJSONArray("author")))
.withField(StandardField.YEAR,
Optional.ofNullable(item.optJSONObject("issued"))
.map(array -> array.optJSONArray("date-parts"))
.map(array -> array.optJSONArray(0))
.map(array -> array.optInt(0))
.map(year -> Integer.toString(year)).orElse(""))
.withField(StandardField.DOI, item.getString("DOI"))
.withField(StandardField.PAGES, item.optString("page"))
.withField(StandardField.ISSN, item.optString("ISSN"))
.withField(StandardField.JOURNAL, item.optString("container-title"))
.withField(StandardField.PUBLISHER, item.optString("publisher"))
.withField(StandardField.URL, item.optString("URL"))
.withField(StandardField.VOLUME, item.optString("volume"));
} catch (JSONException exception) {
throw new ParseException("mEdRA API JSON format has changed", exception);
}
}
private EntryType convertType(String type) {
return "article-journal".equals(type) ? StandardEntryType.Article : StandardEntryType.Misc;
}
private String toAuthors(JSONArray authors) {
if (authors == null) {
return "";
}
// input: list of {"literal":"A."}
return IntStream.range(0, authors.length())
.mapToObj(authors::getJSONObject)
.map(author -> author.has("literal") ? // quickly route through the literal string
new Author(author.getString("literal"), "", "", "", "") :
new Author(author.optString("given", ""), "", "", author.optString("family", ""), ""))
.collect(AuthorList.collect())
.getAsFirstLastNamesWithAnd();
}
@Override
public URLDownload getUrlDownload(URL url) {
URLDownload download = new URLDownload(url);
download.addHeader("Accept", MediaTypes.CITATIONSTYLES_JSON);
return download;
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
return new URL(API_URL + "/" + identifier);
}
@Override
public void doPostCleanup(BibEntry entry) {
new DoiCleanup().cleanup(entry);
}
}
| 4,639 | 39.347826 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/MrDLibFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fileformat.MrDLibImporter;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.net.URLDownload;
import org.jabref.logic.util.Version;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.preferences.MrDlibPreferences;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for getting the recommendations from Mr. DLib
*/
public class MrDLibFetcher implements EntryBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(MrDLibFetcher.class);
private static final String NAME = "MDL_FETCHER";
private static final String MDL_JABREF_PARTNER_ID = "1";
private static final String MDL_URL = "api.mr-dlib.org";
private static final String DEFAULT_MRDLIB_ERROR_MESSAGE = Localization.lang("Error while fetching recommendations from Mr.DLib.");
private final String LANGUAGE;
private final Version VERSION;
private String heading;
private String description;
private String recommendationSetId;
private final MrDlibPreferences preferences;
public MrDLibFetcher(String language, Version version, MrDlibPreferences preferences) {
LANGUAGE = language;
VERSION = version;
this.preferences = preferences;
}
@Override
public String getName() {
return NAME;
}
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> title = entry.getLatexFreeField(StandardField.TITLE);
if (title.isPresent()) {
String response = makeServerRequest(title.get());
MrDLibImporter importer = new MrDLibImporter();
ParserResult parserResult;
try {
if (importer.isRecognizedFormat(response)) {
parserResult = importer.importDatabase(response);
heading = importer.getRecommendationsHeading();
description = importer.getRecommendationsDescription();
recommendationSetId = importer.getRecommendationSetId();
} else {
// For displaying An ErrorMessage
description = DEFAULT_MRDLIB_ERROR_MESSAGE;
BibDatabase errorBibDataBase = new BibDatabase();
parserResult = new ParserResult(errorBibDataBase);
}
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
throw new FetcherException("JSON Parser IOException.");
}
return parserResult.getDatabase().getEntries();
} else {
// without a title there is no reason to ask MrDLib
return new ArrayList<>(0);
}
}
public String getHeading() {
return heading;
}
public String getDescription() {
return description;
}
/**
* Contact the server with the title of the selected item
*
* @param queryByTitle the query holds the title of the selected entry. Used to make a query to the MDL Server
* @return Returns the server response. This is an XML document as a String.
*/
private String makeServerRequest(String queryByTitle) throws FetcherException {
try {
URLDownload urlDownload = new URLDownload(constructQuery(queryByTitle));
String response = urlDownload.asString();
// Conversion of < and >
response = response.replaceAll(">", ">");
response = response.replaceAll("<", "<");
return response;
} catch (IOException e) {
throw new FetcherException("Problem downloading", e);
}
}
/**
* Constructs the query based on title of the BibEntry. Adds statistical stuff to the url.
*
* @param queryWithTitle the title of the bib entry.
* @return the string used to make the query at mdl server
*/
private String constructQuery(String queryWithTitle) {
// The encoding does not work for / so we convert them by our own
queryWithTitle = queryWithTitle.replaceAll("/", " ");
URIBuilder builder = new URIBuilder();
builder.setScheme("http");
builder.setHost(MDL_URL);
builder.setPath("/v2/documents/" + queryWithTitle + "/related_documents");
builder.addParameter("partner_id", MDL_JABREF_PARTNER_ID);
builder.addParameter("app_id", "jabref_desktop");
builder.addParameter("app_version", VERSION.getFullVersion());
if (preferences.shouldSendLanguage()) {
builder.addParameter("app_lang", LANGUAGE);
}
if (preferences.shouldSendOs()) {
builder.addParameter("os", System.getProperty("os.name"));
}
if (preferences.shouldSendTimezone()) {
builder.addParameter("timezone", Calendar.getInstance().getTimeZone().getID());
}
try {
URI uri = builder.build();
LOGGER.trace("Request: " + uri.toString());
return uri.toString();
} catch (URISyntaxException e) {
LOGGER.error(e.getMessage(), e);
}
return "";
}
}
| 5,734 | 37.489933 | 135 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/OpenAccessDoi.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import kong.unirest.HttpResponse;
import kong.unirest.JsonNode;
import kong.unirest.Unirest;
import kong.unirest.UnirestException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A fulltext fetcher that uses <a href="https://oadoi.org/">oaDOI</a>.
*
* API is documented at http://unpaywall.org/api/v2
*/
public class OpenAccessDoi implements FulltextFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(FulltextFetcher.class);
private static final String API_URL = "https://api.oadoi.org/v2/";
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException {
Objects.requireNonNull(entry);
Optional<DOI> doi = entry.getField(StandardField.DOI)
.flatMap(DOI::parse);
if (!doi.isPresent()) {
return Optional.empty();
}
try {
return findFullText(doi.get());
} catch (UnirestException e) {
throw new IOException(e);
}
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.META_SEARCH;
}
public Optional<URL> findFullText(DOI doi) throws UnirestException {
HttpResponse<JsonNode> request = Unirest.get(API_URL + doi.getDOI() + "?email=developers@jabref.org")
.header("accept", "application/json")
.asJson();
return Optional.of(request)
.map(HttpResponse::getBody)
.filter(Objects::nonNull)
.map(JsonNode::getObject)
.filter(Objects::nonNull)
.map(root -> root.optJSONObject("best_oa_location"))
.filter(Objects::nonNull)
.map(location -> location.optString("url"))
.flatMap(url -> {
try {
return Optional.of(new URL(url));
} catch (MalformedURLException e) {
LOGGER.debug("Could not determine URL to fetch full text from", e);
return Optional.empty();
}
});
}
}
| 2,671 | 33.701299 | 109 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ResearchGate.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.SearchBasedFetcher;
import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.logic.layout.format.RTFChars;
import org.jabref.logic.net.URLDownload;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ResearchGate implements FulltextFetcher, EntryBasedFetcher, SearchBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(ResearchGate.class);
private static final String HOST = "https://www.researchgate.net/";
private static final String GOOGLE_SEARCH = "https://www.google.com/search?q=";
private static final String GOOGLE_SITE = "%20site:researchgate.net";
private static final String SEARCH = "https://www.researchgate.net/search.Search.html?";
private static final String SEARCH_FOR_BIB_ENTRY = "https://www.researchgate.net/lite.publication.PublicationDownloadCitationModal.downloadCitation.html?fileType=BibTeX&citation=citationAndAbstract&publicationUid=";
private final ImportFormatPreferences formatPreferences;
public ResearchGate(ImportFormatPreferences importFormatPreferences) {
this.formatPreferences = importFormatPreferences;
}
/**
* Tries to find a fulltext URL for a given BibTex entry.
* <p>
* Search by title first, as DOI is not searchable directly. When the title is not present, the search is made with DOI via google.com with site:researchgate.net
*
* @param entry The Bibtex entry
* @return The fulltext PDF URL Optional, if found, or an empty Optional if not found.
* @throws IOException if an IO operation has failed
* @throws FetcherException if the ResearchGate refuses to serve the page
*/
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException {
Objects.requireNonNull(entry);
Document html;
try {
html = getHTML(entry);
} catch (FetcherException | NullPointerException e) {
LOGGER.debug("ResearchGate server is not available");
return Optional.empty();
}
Elements eLink = html.getElementsByTag("section");
String link = eLink.select("a[href^=https]").select("a[href$=.pdf]").attr("href");
LOGGER.debug("PDF link: {}", link);
if (link.contains("researchgate.net")) {
return Optional.of(new URL(link));
}
return Optional.empty();
}
private Document getHTML(BibEntry entry) throws FetcherException, IOException {
// DOI search
Optional<String> title = entry.getField(StandardField.TITLE);
Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse);
// Retrieve PDF link
Optional<String> linkForSearch;
if (title.isPresent()) {
LOGGER.trace("Search by Title");
linkForSearch = getURLByString(title.get());
if (linkForSearch.isPresent()) {
Connection connection = Jsoup.connect(linkForSearch.get());
return connection
.cookieStore(connection.cookieStore())
.userAgent(URLDownload.USER_AGENT)
.referrer("www.google.com")
.ignoreHttpErrors(true)
.get();
}
}
if (doi.isPresent()) {
LOGGER.trace("Search by DOI");
// Retrieve PDF link
linkForSearch = getURLByDoi(doi.get());
if (linkForSearch.isPresent()) {
Connection connection = Jsoup.connect(linkForSearch.get());
return connection
.cookieStore(connection.cookieStore())
.userAgent(URLDownload.USER_AGENT)
.ignoreHttpErrors(true)
.get();
}
}
throw new FetcherException("Could not find a pdf");
}
Optional<String> getURLByString(String query) throws IOException, NullPointerException {
URIBuilder source;
String link;
try {
source = new URIBuilder(SEARCH);
source.addParameter("type", "publication");
source.addParameter("query", query);
URLDownload urlDownload = new URLDownload(source.toString());
urlDownload.getCookieFromUrl();
Document html = Jsoup.connect(source.toString())
.userAgent(URLDownload.USER_AGENT)
.referrer("www.google.com")
.ignoreHttpErrors(true)
.get();
link = HOST + Objects.requireNonNull(html.getElementById("content"))
.select("a[href^=publication/]")
.attr("href");
if (link.contains("?")) {
link = link.substring(0, link.indexOf("?"));
}
} catch (URISyntaxException e) {
return Optional.empty();
}
LOGGER.trace("URL for page: {}", link);
return Optional.of(link);
}
Optional<String> getURLByDoi(DOI doi) throws IOException, NullPointerException {
String link;
try {
URIBuilder source = new URIBuilder(SEARCH);
source.addParameter("type", "publication");
source.addParameter("query", doi.getDOI());
source = new URIBuilder(GOOGLE_SEARCH + doi.getDOI() + GOOGLE_SITE);
Connection connection = Jsoup.connect(source.toString());
Document html = connection
.cookieStore(connection.cookieStore())
.userAgent(URLDownload.USER_AGENT)
.ignoreHttpErrors(true)
.get();
link = Objects.requireNonNull(html.getElementById("search"))
.select("a").attr("href");
} catch (URISyntaxException e) {
return Optional.empty();
}
LOGGER.trace("URL for page: {}", link);
return Optional.of(link);
}
/**
* Constructs a URL based on the query, size and page number.
* <p>
* Extract the numerical internal ID and add it to the URL to receive a link to a {@link BibEntry}
*
* @param luceneQuery the search query.
* @return A URL that lets us download a .bib file
* @throws URISyntaxException from {@link URIBuilder}'s build() method
* @throws IOException from {@link Connection}'s get() method
*/
private Document getPage(QueryNode luceneQuery) throws URISyntaxException, IOException {
String query = new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse("");
URIBuilder source = new URIBuilder(SEARCH);
source.addParameter("type", "publication");
source.addParameter("query", query);
return Jsoup.connect(source.build().toString())
.userAgent(URLDownload.USER_AGENT)
.referrer("www.google.com")
.ignoreHttpErrors(true)
.get();
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.META_SEARCH;
}
/**
* This method is used to send complex queries using fielded search.
*
* @param luceneQuery the root node of the lucene query
* @return a list of {@link BibEntry}, which are matched by the query (maybe empty)
* @throws FetcherException if the ResearchGate refuses to serve the page
*/
@Override
public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException {
Document html;
try {
html = getPage(luceneQuery);
// ResearchGate's server blocks when too many request are made
if (!html.getElementsByClass("nova-legacy-v-publication-item__title").hasText()) {
throw new FetcherException("ResearchGate server unavailable");
}
} catch (URISyntaxException | IOException e) {
throw new FetcherException("URL is not correct", e);
}
Elements sol = html.getElementsByClass("nova-legacy-v-publication-item__title");
List<String> urls = sol.select("a").eachAttr("href").stream()
.filter(stream -> stream.contains("publication/"))
.map(resultStream -> resultStream.substring(resultStream.indexOf("publication/") + 12, resultStream.indexOf("_")))
.map(idStream -> SEARCH_FOR_BIB_ENTRY + idStream)
.map(this::getInputStream)
.filter(Objects::nonNull)
.map(stream -> stream.lines().collect(Collectors.joining(OS.NEWLINE)))
.toList();
List<BibEntry> list = new ArrayList<>();
for (String bib : urls) {
BibtexParser parser = new BibtexParser(formatPreferences);
Optional<BibEntry> entry;
try {
entry = parser.parseSingleEntry(bib);
entry.ifPresent(list::add);
} catch (ParseException e) {
LOGGER.debug("Entry is not convertible to Bibtex", e);
}
}
return list;
}
private BufferedReader getInputStream(String urlString) {
try {
URL url = new URL(urlString);
return new BufferedReader(new InputStreamReader(url.openStream()));
} catch (IOException e) {
LOGGER.debug("Wrong URL:", e);
}
return null;
}
@Override
public String getName() {
return "ResearchGate";
}
/**
* Looks for hits which are matched by the given {@link BibEntry}.
*
* @param entry entry to search bibliographic information for
* @return a list of {@link BibEntry}, which are matched by the query (maybe empty)
* @throws FetcherException if the ResearchGate refuses to serve the page
*/
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> title = entry.getTitle();
if (title.isEmpty()) {
return new ArrayList<>();
}
return performSearch(new RTFChars().format(title.get()));
}
}
| 11,449 | 40.636364 | 219 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/RfcFetcher.java | package org.jabref.logic.importer.fetcher;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Locale;
import java.util.Optional;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.apache.http.client.utils.URIBuilder;
/*
* https://datatracker.ietf.org
* IETF (Internet Engineering Task Force) datatracker contains data about the documents,
* working groups, meetings, agendas, minutes, presentations, and more, of the IETF.
*/
public class RfcFetcher implements IdBasedParserFetcher {
private final static String DRAFT_PREFIX = "draft";
private final ImportFormatPreferences importFormatPreferences;
public RfcFetcher(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
@Override
public String getName() {
return "RFC";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_RFC);
}
/**
* Get the URL of the RFC resource according to the given identifier
*
* @param identifier the ID
* @return the URL of the RFC resource
*/
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
String prefixedIdentifier = identifier.toLowerCase(Locale.ENGLISH);
// if not a "draft" version
if ((!prefixedIdentifier.startsWith(DRAFT_PREFIX)) && (!prefixedIdentifier.startsWith("rfc"))) {
// Add "rfc" prefix if user's search entry was numerical
prefixedIdentifier = "rfc" + prefixedIdentifier;
}
URIBuilder uriBuilder = new URIBuilder("https://datatracker.ietf.org/doc/" + prefixedIdentifier + "/bibtex/");
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
return new BibtexParser(importFormatPreferences);
}
}
| 2,221 | 33.184615 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ScienceDirect.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.ImporterPreferences;
import org.jabref.logic.net.URLDownload;
import org.jabref.logic.preferences.FetcherApiKey;
import org.jabref.logic.util.BuildInfo;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import kong.unirest.HttpResponse;
import kong.unirest.JsonNode;
import kong.unirest.Unirest;
import kong.unirest.UnirestException;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Node;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* FulltextFetcher implementation that attempts to find a PDF URL at <a href="https://www.sciencedirect.com/">ScienceDirect</a>.
* See <a href="https://dev.elsevier.com/">https://dev.elsevier.com/</a>.
*/
public class ScienceDirect implements FulltextFetcher, CustomizableKeyFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(ScienceDirect.class);
private static final String API_URL = "https://api.elsevier.com/content/article/doi/";
private static final String API_KEY = new BuildInfo().scienceDirectApiKey;
private static final String FETCHER_NAME = "ScienceDirect";
private final ImporterPreferences importerPreferences;
public ScienceDirect(ImporterPreferences importerPreferences) {
this.importerPreferences = importerPreferences;
}
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException {
Objects.requireNonNull(entry);
Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse);
if (doi.isEmpty()) {
// Full text fetching works only if a DOI is present
return Optional.empty();
}
String urlFromDoi = getUrlByDoi(doi.get().getDOI());
if (urlFromDoi.isEmpty()) {
return Optional.empty();
}
// Scrape the web page as desktop client (not as mobile client!)
Document html = Jsoup.connect(urlFromDoi)
.userAgent(URLDownload.USER_AGENT)
.referrer("https://www.google.com")
.ignoreHttpErrors(true)
.get();
// Retrieve PDF link from meta data (most recent)
Elements metaLinks = html.getElementsByAttributeValue("name", "citation_pdf_url");
if (!metaLinks.isEmpty()) {
String link = metaLinks.first().attr("content");
return Optional.of(new URL(link));
}
// We use the ScienceDirect web page which contains the article (presented using HTML).
// This page contains the link to the PDF in some JavaScript code embedded in the web page.
// Example page: https://www.sciencedirect.com/science/article/pii/S1674775515001079
Optional<JSONObject> pdfDownloadOptional = html
.getElementsByAttributeValue("type", "application/json")
.stream()
.flatMap(element -> element.getElementsByTag("script").stream())
// The first DOM child of the script element is the script itself (represented as HTML text)
.map(element -> element.childNode(0))
.map(Node::toString)
.map(JSONObject::new)
.filter(json -> json.has("article"))
.map(json -> json.getJSONObject("article"))
.filter(json -> json.has("pdfDownload"))
.map(json -> json.getJSONObject("pdfDownload"))
.findAny();
if (pdfDownloadOptional.isEmpty()) {
LOGGER.debug("No 'pdfDownload' key found in JSON information");
return Optional.empty();
}
JSONObject pdfDownload = pdfDownloadOptional.get();
String fullLinkToPdf;
if (pdfDownload.has("linkToPdf")) {
String linkToPdf = pdfDownload.getString("linkToPdf");
URL url = new URL(urlFromDoi);
fullLinkToPdf = String.format("%s://%s%s", url.getProtocol(), url.getAuthority(), linkToPdf);
} else if (pdfDownload.has("urlMetadata")) {
JSONObject urlMetadata = pdfDownload.getJSONObject("urlMetadata");
JSONObject queryParamsObject = urlMetadata.getJSONObject("queryParams");
String queryParameters = queryParamsObject.keySet().stream()
.map(key -> String.format("%s=%s", key, queryParamsObject.getString(key)))
.collect(Collectors.joining("&"));
fullLinkToPdf = String.format("https://www.sciencedirect.com/%s/%s%s?%s",
urlMetadata.getString("path"),
urlMetadata.getString("pii"),
urlMetadata.getString("pdfExtension"),
queryParameters);
} else {
LOGGER.debug("No suitable data in JSON information");
return Optional.empty();
}
LOGGER.info("Fulltext PDF found at ScienceDirect at {}.", fullLinkToPdf);
try {
return Optional.of(new URL(fullLinkToPdf));
} catch (MalformedURLException e) {
LOGGER.error("malformed URL", e);
return Optional.empty();
}
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.PUBLISHER;
}
private String getUrlByDoi(String doi) throws UnirestException {
String sciLink = "";
try {
String request = API_URL + doi;
HttpResponse<JsonNode> jsonResponse = Unirest.get(request)
.header("X-ELS-APIKey", this.getApiKey())
.queryString("httpAccept", "application/json")
.asJson();
JSONObject json = jsonResponse.getBody().getObject();
JSONArray links = json.getJSONObject("full-text-retrieval-response")
.getJSONObject("coredata")
.getJSONArray("link");
for (int i = 0; i < links.length(); i++) {
JSONObject link = links.getJSONObject(i);
if (link.getString("@rel").equals("scidir")) {
sciLink = link.getString("@href");
}
}
return sciLink;
} catch (JSONException e) {
LOGGER.debug("No ScienceDirect link found in API request", e);
return sciLink;
}
}
@Override
public String getName() {
return FETCHER_NAME;
}
private String getApiKey() {
return importerPreferences.getApiKeys()
.stream()
.filter(key -> key.getName().equalsIgnoreCase(this.getName()))
.filter(FetcherApiKey::shouldUse)
.findFirst()
.map(FetcherApiKey::getKey)
.orElse(API_KEY);
}
}
| 7,583 | 41.133333 | 128 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/SemanticScholar.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.PagedSearchBasedParserFetcher;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer;
import org.jabref.logic.importer.util.JsonReader;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.ArXivIdentifier;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.entry.types.StandardEntryType;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SemanticScholar implements FulltextFetcher, PagedSearchBasedParserFetcher, EntryBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(SemanticScholar.class);
private static final String SOURCE_ID_SEARCH = "https://api.semanticscholar.org/v1/paper/";
private static final String SOURCE_WEB_SEARCH = "https://api.semanticscholar.org/graph/v1/paper/search?";
/**
* Tries to find a fulltext URL for a given BibTex entry.
* <p>
* Uses the DOI if present, otherwise the arXiv identifier.
*
* @param entry The Bibtex entry
* @return The fulltext PDF URL Optional, if found, or an empty Optional if not found.
* @throws IOException if a page could not be fetched correctly
* @throws FetcherException if the received page differs from what was expected
*/
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException {
Objects.requireNonNull(entry);
Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse);
Optional<ArXivIdentifier> arXiv = entry.getField(StandardField.EPRINT).flatMap(ArXivIdentifier::parse);
Document html = null;
if (doi.isPresent()) {
try {
// Retrieve PDF link
String source = SOURCE_ID_SEARCH + doi.get().getDOI();
html = Jsoup.connect(getURLBySource(source))
.userAgent(URLDownload.USER_AGENT)
.referrer("https://www.google.com")
.ignoreHttpErrors(true)
.get();
} catch (IOException e) {
LOGGER.info("Error for pdf lookup with DOI");
}
}
if (arXiv.isPresent() && entry.getField(StandardField.EPRINT).isPresent()) {
// Check if entry is a match
String arXivString = entry.getField(StandardField.EPRINT).get();
if (!arXivString.startsWith("arXiv:")) {
arXivString = "arXiv:" + arXivString;
}
String source = SOURCE_ID_SEARCH + arXivString;
html = Jsoup.connect(getURLBySource(source))
.userAgent(URLDownload.USER_AGENT)
.referrer("https://www.google.com")
.ignoreHttpErrors(true)
.get();
}
if (html == null) {
return Optional.empty();
}
// Retrieve PDF link from button on the webpage
// First checked is a drop-down menu, as it has the correct URL if present
// Else take the primary button
Elements metaLinks = html.getElementsByClass("flex-item alternate-sources__dropdown");
String link = metaLinks.select("a").attr("href");
if (link.length() < 10) {
metaLinks = html.getElementsByClass("flex-paper-actions__button--primary");
link = metaLinks.select("a").attr("href");
}
if (link.isBlank()) {
return Optional.empty();
}
LOGGER.info("Fulltext PDF found @ SemanticScholar. Link: {}", link);
return Optional.of(new URL(link));
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.META_SEARCH;
}
String getURLBySource(String source) throws IOException, FetcherException {
URLDownload download = new URLDownload(source);
JSONObject json = new JSONObject(download.asString());
LOGGER.debug("URL for source: {}", json.get("url").toString());
if (!json.has("url")) {
throw new FetcherException("Page does not contain field \"url\"");
}
return json.get("url").toString();
}
@Override
public URL getURLForQuery(QueryNode luceneQuery, int pageNumber) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(SOURCE_WEB_SEARCH);
uriBuilder.addParameter("query", new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse(""));
uriBuilder.addParameter("offset", String.valueOf(pageNumber * getPageSize()));
uriBuilder.addParameter("limit", String.valueOf(Math.min(getPageSize(), 10000 - pageNumber * getPageSize())));
// All fields need to be specified
uriBuilder.addParameter("fields", "paperId,externalIds,url,title,abstract,venue,year,authors");
LOGGER.debug("URL for query: {}", uriBuilder.build().toURL());
return uriBuilder.build().toURL();
}
/**
* Returns the parser used to convert the response to a list of {@link BibEntry}.
*/
@Override
public Parser getParser() {
return inputStream -> {
JSONObject response = JsonReader.toJsonObject(inputStream);
LOGGER.debug("Response for Parser: {}", response);
if (response.isEmpty()) {
return Collections.emptyList();
}
int total = response.getInt("total");
if (total == 0) {
return Collections.emptyList();
} else if (response.has("next")) {
total = Math.min(total, response.getInt("next") - response.getInt("offset"));
}
// Response contains a list
JSONArray items = response.getJSONArray("data");
List<BibEntry> entries = new ArrayList<>(items.length());
for (int i = 0; i < total; i++) {
JSONObject item = items.getJSONObject(i);
BibEntry entry = jsonItemToBibEntry(item);
entries.add(entry);
}
return entries;
};
}
/**
* This is copy-paste from CrossRef, need to be checked.
*
* @param item an entry received, needs to be parsed into a BibEntry
* @return The BibEntry that corresponds to the received object
* @throws ParseException if the JSONObject could not be parsed
*/
private BibEntry jsonItemToBibEntry(JSONObject item) throws ParseException {
try {
BibEntry entry = new BibEntry(StandardEntryType.Article);
entry.setField(StandardField.URL, item.optString("url"));
entry.setField(StandardField.TITLE, item.optString("title"));
entry.setField(StandardField.ABSTRACT, item.optString("abstract"));
entry.setField(StandardField.VENUE, item.optString("venue"));
entry.setField(StandardField.YEAR, item.optString("year"));
entry.setField(StandardField.AUTHOR,
IntStream.range(0, item.optJSONArray("authors").length())
.mapToObj(item.optJSONArray("authors")::getJSONObject)
.map(author -> author.has("name") ? author.getString("name") : "")
.collect(Collectors.joining(" and ")));
JSONObject externalIds = item.optJSONObject("externalIds");
entry.setField(StandardField.DOI, externalIds.optString("DOI"));
if (externalIds.has("ArXiv")) {
entry.setField(StandardField.EPRINT, externalIds.getString("ArXiv"));
entry.setField(StandardField.EPRINTTYPE, "arXiv");
}
entry.setField(StandardField.PMID, externalIds.optString("PubMed"));
return entry;
} catch (JSONException exception) {
throw new ParseException("SemanticScholar API JSON format has changed", exception);
}
}
/**
* Returns the localized name of this fetcher. The title can be used to display the fetcher in the menu and in the side pane.
*
* @return the localized name
*/
@Override
public String getName() {
return "SemanticScholar";
}
/**
* Looks for hits which are matched by the given {@link BibEntry}.
*
* @param entry entry to search bibliographic information for
* @return a list of {@link BibEntry}, which are matched by the query (may be empty)
* @throws FetcherException if an error linked to the Fetcher applies
*/
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> title = entry.getTitle();
if (title.isEmpty()) {
return new ArrayList<>();
}
return performSearch(title.get());
}
}
| 9,897 | 41.480687 | 137 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/SpringerFetcher.java | package org.jabref.logic.importer.fetcher;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImporterPreferences;
import org.jabref.logic.importer.PagedSearchBasedParserFetcher;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fetcher.transformers.SpringerQueryTransformer;
import org.jabref.logic.preferences.FetcherApiKey;
import org.jabref.logic.util.BuildInfo;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Fetches data from the Springer
*
* @see <a href="https://dev.springernature.com/">API documentation</a> for more details
*/
public class SpringerFetcher implements PagedSearchBasedParserFetcher, CustomizableKeyFetcher {
public static final String FETCHER_NAME = "Springer";
private static final Logger LOGGER = LoggerFactory.getLogger(SpringerFetcher.class);
private static final String API_URL = "https://api.springernature.com/meta/v1/json";
private static final String API_KEY = new BuildInfo().springerNatureAPIKey;
// Springer query using the parameter 'q=doi:10.1007/s11276-008-0131-4s=1' will respond faster
private static final String TEST_URL_WITHOUT_API_KEY = "https://api.springernature.com/meta/v1/json?q=doi:10.1007/s11276-008-0131-4s=1&p=1&api_key=";
private final ImporterPreferences importerPreferences;
public SpringerFetcher(ImporterPreferences importerPreferences) {
this.importerPreferences = importerPreferences;
}
/**
* Convert a JSONObject obtained from <a href="http://api.springer.com/metadata/json">http://api.springer.com/metadata/json</a> to a BibEntry
*
* @param springerJsonEntry the JSONObject from search results
* @return the converted BibEntry
*/
public static BibEntry parseSpringerJSONtoBibtex(JSONObject springerJsonEntry) {
// Fields that are directly accessible at the top level Json object
Field[] singleFieldStrings = {StandardField.ISSN, StandardField.VOLUME, StandardField.ABSTRACT, StandardField.DOI, StandardField.TITLE, StandardField.NUMBER,
StandardField.PUBLISHER};
BibEntry entry = new BibEntry();
Field nametype;
// Guess publication type
String isbn = springerJsonEntry.optString("isbn");
if (com.google.common.base.Strings.isNullOrEmpty(isbn)) {
// Probably article
entry.setType(StandardEntryType.Article);
nametype = StandardField.JOURNAL;
} else {
// Probably book chapter or from proceeding, go for book chapter
entry.setType(StandardEntryType.InCollection);
nametype = StandardField.BOOKTITLE;
entry.setField(StandardField.ISBN, isbn);
}
// Authors
if (springerJsonEntry.has("creators")) {
JSONArray authors = springerJsonEntry.getJSONArray("creators");
List<String> authorList = new ArrayList<>();
for (int i = 0; i < authors.length(); i++) {
if (authors.getJSONObject(i).has("creator")) {
authorList.add(authors.getJSONObject(i).getString("creator"));
} else {
LOGGER.info("Empty author name.");
}
}
entry.setField(StandardField.AUTHOR, String.join(" and ", authorList));
} else {
LOGGER.info("No author found.");
}
// Direct accessible fields
for (Field field : singleFieldStrings) {
if (springerJsonEntry.has(field.getName())) {
String text = springerJsonEntry.getString(field.getName());
if (!text.isEmpty()) {
entry.setField(field, text);
}
}
}
// Page numbers
if (springerJsonEntry.has("startingPage") && !(springerJsonEntry.getString("startingPage").isEmpty())) {
if (springerJsonEntry.has("endingPage") && !(springerJsonEntry.getString("endingPage").isEmpty())) {
entry.setField(StandardField.PAGES,
springerJsonEntry.getString("startingPage") + "--" + springerJsonEntry.getString("endingPage"));
} else {
entry.setField(StandardField.PAGES, springerJsonEntry.getString("startingPage"));
}
}
// Journal
if (springerJsonEntry.has("publicationName")) {
entry.setField(nametype, springerJsonEntry.getString("publicationName"));
}
// Online file
if (springerJsonEntry.has("url")) {
JSONArray urls = springerJsonEntry.optJSONArray("url");
if (urls == null) {
entry.setField(StandardField.URL, springerJsonEntry.optString("url"));
} else {
urls.forEach(data -> {
JSONObject url = (JSONObject) data;
if (url.optString("format").equalsIgnoreCase("pdf")) {
try {
entry.addFile(new LinkedFile(new URL(url.optString("value")), "PDF"));
} catch (MalformedURLException e) {
LOGGER.info("Malformed URL: {}", url.optString("value"));
}
}
});
}
}
// Date
if (springerJsonEntry.has("publicationDate")) {
String date = springerJsonEntry.getString("publicationDate");
entry.setField(StandardField.DATE, date); // For biblatex
String[] dateparts = date.split("-");
entry.setField(StandardField.YEAR, dateparts[0]);
Optional<Month> month = Month.getMonthByNumber(Integer.parseInt(dateparts[1]));
month.ifPresent(entry::setMonth);
}
// Clean up abstract (often starting with Abstract)
entry.getField(StandardField.ABSTRACT).ifPresent(abstractContents -> {
if (abstractContents.startsWith("Abstract")) {
entry.setField(StandardField.ABSTRACT, abstractContents.substring(8));
}
});
return entry;
}
@Override
public String getName() {
return FETCHER_NAME;
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_SPRINGER);
}
private String getApiKey() {
return importerPreferences.getApiKeys()
.stream()
.filter(key -> key.getName().equalsIgnoreCase(FETCHER_NAME))
.filter(FetcherApiKey::shouldUse)
.findFirst()
.map(FetcherApiKey::getKey)
.orElse(API_KEY);
}
@Override
public String getTestUrl() {
return TEST_URL_WITHOUT_API_KEY;
}
/**
* Gets the query URL
*
* @param luceneQuery the search query
* @param pageNumber the number of the page indexed from 0
* @return URL
*/
@Override
public URL getURLForQuery(QueryNode luceneQuery, int pageNumber) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder(API_URL);
uriBuilder.addParameter("q", new SpringerQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); // Search query
uriBuilder.addParameter("api_key", getApiKey()); // API key
uriBuilder.addParameter("s", String.valueOf(getPageSize() * pageNumber + 1)); // Start entry, starts indexing at 1
uriBuilder.addParameter("p", String.valueOf(getPageSize())); // Page size
return uriBuilder.build().toURL();
}
private String constructComplexQueryString(ComplexSearchQuery complexSearchQuery) {
List<String> searchTerms = new ArrayList<>();
complexSearchQuery.getAuthors().forEach(author -> searchTerms.add("name:" + author));
complexSearchQuery.getTitlePhrases().forEach(title -> searchTerms.add("title:" + title));
complexSearchQuery.getJournal().ifPresent(journal -> searchTerms.add("journal:" + journal));
// Since Springer API does not support year range search, we ignore formYear and toYear and use "singleYear" only
complexSearchQuery.getSingleYear().ifPresent(year -> searchTerms.add("date:" + year + "*"));
searchTerms.addAll(complexSearchQuery.getDefaultFieldPhrases());
return String.join(" AND ", searchTerms);
}
@Override
public Parser getParser() {
return inputStream -> {
String response = new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining(OS.NEWLINE));
JSONObject jsonObject = new JSONObject(response);
List<BibEntry> entries = new ArrayList<>();
if (jsonObject.has("records")) {
JSONArray results = jsonObject.getJSONArray("records");
for (int i = 0; i < results.length(); i++) {
JSONObject jsonEntry = results.getJSONObject(i);
BibEntry entry = parseSpringerJSONtoBibtex(jsonEntry);
entries.add(entry);
}
}
return entries;
};
}
}
| 10,098 | 41.25523 | 165 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/SpringerLink.java | package org.jabref.logic.importer.fetcher;
import java.io.IOException;
import java.net.URL;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.importer.FulltextFetcher;
import org.jabref.logic.importer.ImporterPreferences;
import org.jabref.logic.preferences.FetcherApiKey;
import org.jabref.logic.util.BuildInfo;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import kong.unirest.HttpResponse;
import kong.unirest.JsonNode;
import kong.unirest.Unirest;
import kong.unirest.UnirestException;
import kong.unirest.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* FulltextFetcher implementation that attempts to find a PDF URL at SpringerLink.
* <p>
* Uses Springer API, see <a href="https://dev.springer.com">https://dev.springer.com</a>
*/
public class SpringerLink implements FulltextFetcher {
public static final String FETCHER_NAME = "Springer";
private static final Logger LOGGER = LoggerFactory.getLogger(SpringerLink.class);
private static final String API_URL = "https://api.springer.com/meta/v1/json";
private static final String API_KEY = new BuildInfo().springerNatureAPIKey;
private static final String CONTENT_HOST = "link.springer.com";
private final ImporterPreferences importerPreferences;
public SpringerLink(ImporterPreferences importerPreferences) {
this.importerPreferences = importerPreferences;
}
private String getApiKey() {
return importerPreferences.getApiKeys()
.stream()
.filter(key -> key.getName().equalsIgnoreCase(FETCHER_NAME))
.filter(FetcherApiKey::shouldUse)
.findFirst()
.map(FetcherApiKey::getKey)
.orElse(API_KEY);
}
@Override
public Optional<URL> findFullText(BibEntry entry) throws IOException {
Objects.requireNonNull(entry);
// Try unique DOI first
Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse);
if (doi.isEmpty()) {
return Optional.empty();
}
// Available in catalog?
try {
HttpResponse<JsonNode> jsonResponse = Unirest.get(API_URL)
.queryString("api_key", getApiKey())
.queryString("q", String.format("doi:%s", doi.get().getDOI()))
.asJson();
if (jsonResponse.getBody() != null) {
JSONObject json = jsonResponse.getBody().getObject();
int results = json.getJSONArray("result").getJSONObject(0).getInt("total");
if (results > 0) {
LOGGER.info("Fulltext PDF found @ Springer.");
return Optional.of(new URL("http", CONTENT_HOST, String.format("/content/pdf/%s.pdf", doi.get().getDOI())));
}
}
} catch (UnirestException e) {
LOGGER.warn("SpringerLink API request failed", e);
}
return Optional.empty();
}
@Override
public TrustLevel getTrustLevel() {
return TrustLevel.PUBLISHER;
}
}
| 3,421 | 37.022222 | 128 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/TitleFetcher.java | package org.jabref.logic.importer.fetcher;
import java.util.Optional;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.WebFetchers;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.strings.StringUtil;
public class TitleFetcher implements IdBasedFetcher {
private final ImportFormatPreferences preferences;
public TitleFetcher(ImportFormatPreferences preferences) {
this.preferences = preferences;
}
@Override
public String getName() {
return "Title";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_TITLE);
}
@Override
public Optional<BibEntry> performSearchById(String identifier) throws FetcherException {
if (StringUtil.isBlank(identifier)) {
return Optional.empty();
}
BibEntry entry = new BibEntry();
entry.setField(StandardField.TITLE, identifier);
Optional<DOI> doi = WebFetchers.getIdFetcherForIdentifier(DOI.class).findIdentifier(entry);
if (doi.isEmpty()) {
return Optional.empty();
}
DoiFetcher doiFetcher = new DoiFetcher(this.preferences);
return doiFetcher.performSearchById(doi.get().getDOI());
}
}
| 1,528 | 28.980392 | 99 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/TrustLevel.java | package org.jabref.logic.importer.fetcher;
/**
* Discussion on the trust levels is available at our <a href="https://devdocs.jabref.org/advanced-reading/fetchers">documentation on fetchers</a>.
*/
public enum TrustLevel {
SOURCE(3),
PUBLISHER(2),
PREPRINT(1),
META_SEARCH(1),
UNKNOWN(0);
private int score;
TrustLevel(int score) {
this.score = score;
}
public int getTrustScore() {
return this.score;
}
}
| 467 | 19.347826 | 147 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ZbMATH.java | package org.jabref.logic.importer.fetcher;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.cleanup.MoveFieldCleanup;
import org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter;
import org.jabref.logic.importer.EntryBasedParserFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedParserFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.SearchBasedParserFetcher;
import org.jabref.logic.importer.fetcher.transformers.ZbMathQueryTransformer;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.AMSField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import kong.unirest.HttpResponse;
import kong.unirest.JsonNode;
import kong.unirest.Unirest;
import kong.unirest.json.JSONArray;
import org.apache.http.client.utils.URIBuilder;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
/**
* Fetches data from the Zentralblatt Math (https://www.zbmath.org/)
*/
public class ZbMATH implements SearchBasedParserFetcher, IdBasedParserFetcher, EntryBasedParserFetcher {
private final ImportFormatPreferences preferences;
public ZbMATH(ImportFormatPreferences preferences) {
this.preferences = Objects.requireNonNull(preferences);
}
@Override
public String getName() {
return "zbMATH";
}
@Override
public URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException, FetcherException {
Optional<String> zblidInEntry = entry.getField(StandardField.ZBL_NUMBER);
if (zblidInEntry.isPresent()) {
// zbmath id is already present
return getUrlForIdentifier(zblidInEntry.get());
}
URIBuilder uriBuilder = new URIBuilder("https://zbmath.org/citationmatching/match");
uriBuilder.addParameter("n", "1"); // return only the best matching entry
uriBuilder.addParameter("m", "5"); // return only entries with a score of at least 5
entry.getFieldOrAlias(StandardField.TITLE).ifPresent(title -> uriBuilder.addParameter("t", title));
entry.getFieldOrAlias(StandardField.JOURNAL).ifPresent(journal -> uriBuilder.addParameter("j", journal));
entry.getFieldOrAlias(StandardField.YEAR).ifPresent(year -> uriBuilder.addParameter("y", year));
entry.getFieldOrAlias(StandardField.PAGINATION)
.ifPresent(pagination -> uriBuilder.addParameter("p", pagination));
entry.getFieldOrAlias(StandardField.VOLUME).ifPresent(volume -> uriBuilder.addParameter("v", volume));
entry.getFieldOrAlias(StandardField.ISSUE).ifPresent(issue -> uriBuilder.addParameter("i", issue));
if (entry.getFieldOrAlias(StandardField.AUTHOR).isPresent()) {
// replace "and" by ";" as citation matching API uses ";" for separation
AuthorList authors = AuthorList.parse(entry.getFieldOrAlias(StandardField.AUTHOR).get());
String authorsWithSemicolon = authors.getAuthors().stream()
.map(author -> author.getLastFirst(false))
.collect(Collectors.joining(";"));
uriBuilder.addParameter("a", authorsWithSemicolon);
}
/*
zbmath citation matching API does only return json, thus we use the
citation matching API to extract the zbl_id and then use getUrlForIdentifier
to get the bibtex data.
*/
String urlString = uriBuilder.build().toString();
HttpResponse<JsonNode> response = Unirest.get(urlString)
.asJson();
String zblid = null;
if (response.getStatus() == 200) {
JSONArray result = response.getBody()
.getObject()
.getJSONArray("results");
if (result.length() > 0) {
zblid = result.getJSONObject(0)
.get("zbl_id")
.toString();
}
}
if (zblid == null) {
// citation matching API found no matching entry
return null;
} else {
return getUrlForIdentifier(zblid);
}
}
@Override
public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder("https://zbmath.org/bibtexoutput/");
uriBuilder.addParameter("q", new ZbMathQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); // search all fields
uriBuilder.addParameter("start", "0"); // start index
uriBuilder.addParameter("count", "200"); // should return up to 200 items (instead of default 100)
return uriBuilder.build().toURL();
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
URIBuilder uriBuilder = new URIBuilder("https://zbmath.org/bibtexoutput/");
String query = "an:".concat(identifier); // use an: to search for a zbMATH identifier
uriBuilder.addParameter("q", query);
uriBuilder.addParameter("start", "0"); // start index
uriBuilder.addParameter("count", "1"); // return exactly one item
return uriBuilder.build().toURL();
}
@Override
public Parser getParser() {
return new BibtexParser(preferences);
}
@Override
public void doPostCleanup(BibEntry entry) {
new MoveFieldCleanup(new UnknownField("msc2010"), StandardField.KEYWORDS).cleanup(entry);
new MoveFieldCleanup(AMSField.FJOURNAL, StandardField.JOURNAL).cleanup(entry);
new FieldFormatterCleanup(StandardField.JOURNAL, new RemoveBracesFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);
}
}
| 6,390 | 45.311594 | 133 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/isbntobibtex/DoiToBibtexConverterComIsbnFetcher.java | package org.jabref.logic.importer.fetcher.isbntobibtex;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fetcher.AbstractIsbnFetcher;
import org.jabref.logic.importer.util.JsonReader;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
/**
* Fetcher for ISBN using <a href="https://doi-to-bibtex-converter.herokuapp.com">doi-to-bibtex-converter.herokuapp</a>.
*/
public class DoiToBibtexConverterComIsbnFetcher extends AbstractIsbnFetcher {
private static final String BASE_URL = "https://doi-to-bibtex-converter.herokuapp.com";
public DoiToBibtexConverterComIsbnFetcher(ImportFormatPreferences importFormatPreferences) {
super(importFormatPreferences);
}
@Override
public String getName() {
return "ISBN (doi-to-bibtex-converter.herokuapp.com)";
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
this.ensureThatIsbnIsValid(identifier);
return new URIBuilder(BASE_URL)
.setPathSegments("getInfo.php")
.setParameter("query", identifier)
.setParameter("format", "json")
.build()
.toURL();
}
@Override
public Parser getParser() {
return inputStream -> {
JSONObject response = JsonReader.toJsonObject(inputStream);
if (response.isEmpty()) {
return Collections.emptyList();
}
String error = response.optString("error");
if (StringUtil.isNotBlank(error)) {
throw new ParseException(error);
}
BibEntry entry = jsonItemToBibEntry(response);
return List.of(entry);
};
}
@Override
public void doPostCleanup(BibEntry entry) {
}
private BibEntry jsonItemToBibEntry(JSONObject item) throws ParseException {
try {
JSONArray data = item.optJSONArray("data");
var type = getElementFromJSONArrayByKey(data, "type");
BibEntry entry = new BibEntry(evaluateBibEntryTypeFromString(type));
entry.setField(StandardField.AUTHOR, getElementFromJSONArrayByKey(data, "author"));
entry.setField(StandardField.PAGES, getElementFromJSONArrayByKey(data, "pagecount"));
entry.setField(StandardField.ISBN, getElementFromJSONArrayByKey(data, "isbn"));
entry.setField(StandardField.TITLE, getElementFromJSONArrayByKey(data, "title"));
entry.setField(StandardField.YEAR, getElementFromJSONArrayByKey(data, "year"));
entry.setField(StandardField.MONTH, getElementFromJSONArrayByKey(data, "month"));
entry.setField(StandardField.DAY, getElementFromJSONArrayByKey(data, "day"));
return entry;
} catch (JSONException exception) {
throw new ParseException("CrossRef API JSON format has changed", exception);
}
}
private String getElementFromJSONArrayByKey(JSONArray jsonArray, String key) {
return IntStream.range(0, jsonArray.length())
.mapToObj(jsonArray::getJSONObject)
.map(obj -> obj.getString(key))
.findFirst()
.orElse("");
}
private StandardEntryType evaluateBibEntryTypeFromString(String type) {
return Stream.of(StandardEntryType.values())
.filter(entryType -> entryType.name().equalsIgnoreCase(type))
.findAny()
.orElse(StandardEntryType.Book);
}
}
| 4,298 | 38.440367 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/isbntobibtex/EbookDeIsbnFetcher.java | package org.jabref.logic.importer.fetcher.isbntobibtex;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.formatter.bibtexfields.NormalizeNamesFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.fetcher.AbstractIsbnFetcher;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.apache.http.client.utils.URIBuilder;
/**
* Fetcher for ISBN using <a href="https://www.ebook.de">https://www.ebook.de</a>.
*/
public class EbookDeIsbnFetcher extends AbstractIsbnFetcher {
private static final String BASE_URL = "https://www.ebook.de/de/tools/isbn2bibtex";
public EbookDeIsbnFetcher(ImportFormatPreferences importFormatPreferences) {
super(importFormatPreferences);
}
@Override
public String getName() {
return "ISBN (ebook.de)";
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
this.ensureThatIsbnIsValid(identifier);
return new URIBuilder(BASE_URL)
.addParameter("isbn", identifier)
.build()
.toURL();
}
@Override
public void doPostCleanup(BibEntry entry) {
// We MUST NOT clean the URL. this is the deal with ebook.de
// DO NOT add following code:
// new FieldFormatterCleanup(StandardField.URL, new ClearFormatter()).cleanup(entry);
// Fetcher returns page numbers as "30 Seiten" -> remove every non-digit character in the PAGETOTAL field
entry.getField(StandardField.PAGETOTAL).ifPresent(pages ->
entry.setField(StandardField.PAGETOTAL, pages.replaceAll("[\\D]", "")));
new FieldFormatterCleanup(StandardField.PAGETOTAL, new NormalizePagesFormatter()).cleanup(entry);
new FieldFormatterCleanup(StandardField.AUTHOR, new NormalizeNamesFormatter()).cleanup(entry);
}
}
| 2,204 | 39.090909 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/isbntobibtex/IsbnFetcher.java | package org.jabref.logic.importer.fetcher.isbntobibtex;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Pattern;
import org.jabref.logic.help.HelpFile;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.IdBasedFetcher;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.fetcher.AbstractIsbnFetcher;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
import org.jabref.model.util.OptionalUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Fetcher to generate the Bibtex entry from an ISBN.
* The default fetcher is the {@link OpenLibraryIsbnFetcher}.
* If the entry is not found in the {@link OpenLibraryIsbnFetcher}.
* Alternative fetcher can be specified with the {@link IsbnFetcher#addRetryFetcher(AbstractIsbnFetcher)} method.
*/
public class IsbnFetcher implements EntryBasedFetcher, IdBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(IsbnFetcher.class);
private static final Pattern NEWLINE_SPACE_PATTERN = Pattern.compile("\\n|\\r\\n|\\s");
protected final ImportFormatPreferences importFormatPreferences;
private final OpenLibraryIsbnFetcher openLibraryIsbnFetcher;
private final List<AbstractIsbnFetcher> retryIsbnFetcher;
public IsbnFetcher(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
this.openLibraryIsbnFetcher = new OpenLibraryIsbnFetcher(importFormatPreferences);
this.retryIsbnFetcher = new ArrayList<>();
}
@Override
public String getName() {
return "ISBN";
}
@Override
public Optional<HelpFile> getHelpPage() {
return Optional.of(HelpFile.FETCHER_ISBN);
}
@Override
public Optional<BibEntry> performSearchById(String identifier) throws FetcherException {
if (StringUtil.isBlank(identifier)) {
return Optional.empty();
}
Optional<BibEntry> bibEntry = Optional.empty();
try {
identifier = removeNewlinesAndSpacesFromIdentifier(identifier);
bibEntry = openLibraryIsbnFetcher.performSearchById(identifier);
} catch (FetcherException ex) {
LOGGER.debug("Got a fetcher exception for IBSN search", ex);
if (retryIsbnFetcher.isEmpty()) {
throw ex;
}
} finally {
LOGGER.debug("Trying using the alternate ISBN fetchers to find an entry.");
// do not move the iterator in the loop as this would always return a new one and thus create and endless loop
Iterator<AbstractIsbnFetcher> iterator = retryIsbnFetcher.iterator();
while (bibEntry.isEmpty() && iterator.hasNext()) {
AbstractIsbnFetcher fetcher = iterator.next();
LOGGER.debug("No entry found for ISBN {}; trying {} next.", identifier, fetcher.getName());
bibEntry = fetcher.performSearchById(identifier);
}
}
if (bibEntry.isEmpty()) {
LOGGER.debug("Could not found a entry for ISBN {}", identifier);
}
return bibEntry;
}
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
Optional<String> isbn = entry.getField(StandardField.ISBN);
if (isbn.isPresent()) {
return OptionalUtil.toList(performSearchById(isbn.get()));
} else {
return Collections.emptyList();
}
}
public IsbnFetcher addRetryFetcher(AbstractIsbnFetcher retryFetcher) {
Objects.requireNonNull(retryFetcher, "Please provide a valid isbn fetcher.");
retryIsbnFetcher.add(retryFetcher);
return this;
}
private String removeNewlinesAndSpacesFromIdentifier(String identifier) {
return NEWLINE_SPACE_PATTERN.matcher(identifier).replaceAll("");
}
}
| 4,184 | 37.394495 | 122 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/isbntobibtex/OpenLibraryIsbnFetcher.java | package org.jabref.logic.importer.fetcher.isbntobibtex;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.jabref.logic.importer.AuthorListParser;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.fetcher.AbstractIsbnFetcher;
import org.jabref.logic.importer.util.JsonReader;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import kong.unirest.JsonNode;
import kong.unirest.Unirest;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Fetcher for OpenLibrary.
* <a href="https://openlibrary.org/dev/docs/api/books">API documentation</a>.
*/
public class OpenLibraryIsbnFetcher extends AbstractIsbnFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenLibraryIsbnFetcher.class);
private static final String BASE_URL = "https://openlibrary.org";
public OpenLibraryIsbnFetcher(ImportFormatPreferences importFormatPreferences) {
super(importFormatPreferences);
}
@Override
public String getName() {
return "OpenLibrary";
}
@Override
public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException {
this.ensureThatIsbnIsValid(identifier);
return new URIBuilder(BASE_URL)
.setPathSegments("isbn", identifier + ".json")
.build()
.toURL();
}
@Override
public Parser getParser() {
return inputStream -> {
JSONObject response = JsonReader.toJsonObject(inputStream);
if (response.isEmpty()) {
return Collections.emptyList();
}
String error = response.optString("error");
if (StringUtil.isNotBlank(error)) {
throw new ParseException(error);
}
BibEntry entry = jsonItemToBibEntry(response);
return List.of(entry);
};
}
@Override
public void doPostCleanup(BibEntry entry) {
}
private BibEntry jsonItemToBibEntry(JSONObject item) throws ParseException {
try {
BibEntry entry = new BibEntry(StandardEntryType.Book);
String authors = toAuthors(item.optJSONArray("authors"));
if (authors.isEmpty()) {
JSONArray works = item.optJSONArray("works");
authors = fromWorksToAuthors(works);
}
entry.setField(StandardField.AUTHOR, authors);
entry.setField(StandardField.PAGES, item.optString("number_of_pages"));
entry.setField(StandardField.ISBN,
Optional.ofNullable(item.optJSONArray("isbn_13")).map(array -> array.getString(0))
.or(() -> Optional.ofNullable(item.optJSONArray("isbn_10")).map(array -> array.getString(0)))
.orElse(""));
entry.setField(StandardField.TITLE,
Optional.ofNullable(item.optString("full_title", null))
.or(() -> Optional.ofNullable(item.optString("title", null)))
.orElse(""));
entry.setField(StandardField.SUBTITLE, item.optString("subtitle"));
Optional<String> yearOpt = Date.parse(item.optString("publish_date")).flatMap(Date::getYear).map(
Object::toString);
yearOpt.ifPresent(year -> entry.setField(StandardField.YEAR, year));
entry.setField(StandardField.PUBLISHER,
Optional.ofNullable(item.optJSONArray("publishers")).map(array -> array.getString(0))
.orElse(""));
return entry;
} catch (JSONException exception) {
throw new ParseException("CrossRef API JSON format has changed", exception);
}
}
private String toAuthors(JSONArray authors) {
if (authors == null) {
return "";
}
return IntStream.range(0, authors.length())
.mapToObj(authors::getJSONObject)
.map(authorObject -> toAuthor(authorObject.getString("key")))
.collect(AuthorList.collect())
.getAsLastFirstNamesWithAnd(false);
}
private Author toAuthor(String key) {
JsonNode authorResponse = Unirest.get(BASE_URL + key + ".json").asJson().getBody();
if (authorResponse == null) {
LOGGER.warn("Could not parse author");
return new Author(null, null, null, null, null);
}
JSONObject result = authorResponse.getObject();
Optional<String> nameOptional = Optional.ofNullable(result.optString("personal_name", null)).or(() -> Optional.ofNullable(result.optString("name", null)));
if (nameOptional.isEmpty()) {
LOGGER.warn("Could not parse author name");
return new Author(null, null, null, null, null);
}
AuthorListParser authorListParser = new AuthorListParser();
AuthorList authorList = authorListParser.parse(nameOptional.get());
return authorList.getAuthor(0);
}
private String fromWorksToAuthors(JSONArray works) {
if (works == null) {
return "";
}
List<Author> authors = IntStream.range(0, works.length())
.mapToObj(works::getJSONObject)
.map(obj -> obj.getString("key"))
.map(worksLink -> BASE_URL + worksLink + ".json")
.flatMap(this::fromWorkToAuthors)
.collect(Collectors.toList());
return AuthorList.of(authors).getAsLastFirstNamesWithAnd(false);
}
private Stream<Author> fromWorkToAuthors(String link) {
JsonNode body = Unirest.get(link).asJson().getBody();
JSONArray authors = body.getObject().optJSONArray("authors");
if (authors == null) {
return Stream.empty();
}
return IntStream.range(0, authors.length())
.mapToObj(authors::getJSONObject)
.map(authorObject -> toAuthor(authorObject.getJSONObject("author").getString("key")));
}
}
| 7,046 | 40.210526 | 163 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/AbstractQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Optional;
import java.util.StringJoiner;
import java.util.stream.Collectors;
import org.jabref.model.strings.StringUtil;
import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.OrQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* In case the transformer contains state for a query transformation (such as the {@link IEEEQueryTransformer}), it has to be noted at the JavaDoc.
* Otherwise, a single instance QueryTransformer can be used.
*/
public abstract class AbstractQueryTransformer {
public static final String NO_EXPLICIT_FIELD = "default";
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractQueryTransformer.class);
// These can be used for filtering in post processing
protected int startYear = Integer.MAX_VALUE;
protected int endYear = Integer.MIN_VALUE;
/**
* Transforms a and b and c to (a AND b AND c), where
* a, b, and c can be complex expressions.
*/
private Optional<String> transform(BooleanQueryNode query) {
String delimiter;
if (query instanceof OrQueryNode) {
delimiter = getLogicalOrOperator();
} else {
// We define the logical AND operator as the default implementation
delimiter = getLogicalAndOperator();
}
String result = query.getChildren().stream()
.map(this::transform)
.flatMap(Optional::stream)
.collect(Collectors.joining(delimiter, "(", ")"));
if ("()".equals(result)) {
return Optional.empty();
}
return Optional.of(result);
}
/**
* Returns the logical AND operator used by the library
* Note: whitespaces have to be included around the operator
*
* Example: <code>" AND "</code>
*/
protected abstract String getLogicalAndOperator();
/**
* Returns the logical OR operator used by the library
* Note: whitespaces have to be included around the operator
*
* Example: <code>" OR "</code>
*/
protected abstract String getLogicalOrOperator();
/**
* Returns the logical NOT operator used by the library
*
* Example: <code>"!"</code>
*/
protected abstract String getLogicalNotOperator();
private Optional<String> transform(FieldQueryNode query) {
String term = query.getTextAsString();
switch (query.getFieldAsString()) {
case "author" -> {
return Optional.of(handleAuthor(term));
}
case "title" -> {
return Optional.of(handleTitle(term));
}
case "journal" -> {
return Optional.of(handleJournal(term));
}
case "year" -> {
String s = handleYear(term);
return s.isEmpty() ? Optional.empty() : Optional.of(s);
}
case "year-range" -> {
String s = handleYearRange(term);
return s.isEmpty() ? Optional.empty() : Optional.of(s);
}
case "doi" -> {
String s = handleDoi(term);
return s.isEmpty() ? Optional.empty() : Optional.of(s);
}
case NO_EXPLICIT_FIELD -> {
return handleUnFieldedTerm(term);
}
default -> {
// Just add unknown fields as default
return handleOtherField(query.getFieldAsString(), term);
}
}
}
protected String handleDoi(String term) {
return "doi:" + term;
}
/**
* Handles the not modifier, all other cases are silently ignored
*/
private Optional<String> transform(ModifierQueryNode query) {
ModifierQueryNode.Modifier modifier = query.getModifier();
if (modifier == ModifierQueryNode.Modifier.MOD_NOT) {
return transform(query.getChild()).map(s -> getLogicalNotOperator() + s);
} else {
return transform(query.getChild());
}
}
/**
* Return a string representation of the author fielded term
*/
protected abstract String handleAuthor(String author);
/**
* Return a string representation of the title fielded term
*/
protected abstract String handleTitle(String title);
/**
* Return a string representation of the journal fielded term
*/
protected abstract String handleJournal(String journalTitle);
/**
* Return a string representation of the year fielded term
*/
protected abstract String handleYear(String year);
/**
* Parses the year range and fills startYear and endYear.
* Ensures that startYear <= endYear
*/
protected void parseYearRange(String yearRange) {
String[] split = yearRange.split("-");
int parsedStartYear = Integer.parseInt(split[0]);
startYear = parsedStartYear;
if (split.length >= 1) {
int parsedEndYear = Integer.parseInt(split[1]);
if (parsedEndYear >= parsedStartYear) {
endYear = parsedEndYear;
} else {
startYear = parsedEndYear;
endYear = parsedStartYear;
}
}
}
/**
* Return a string representation of the year-range fielded term
* Should follow the structure yyyy-yyyy
*
* Example: <code>2015-2021</code>
*/
protected String handleYearRange(String yearRange) {
parseYearRange(yearRange);
if (endYear == Integer.MAX_VALUE) {
// invalid year range
return yearRange;
}
StringJoiner resultBuilder = new StringJoiner(getLogicalOrOperator());
for (int i = startYear; i <= endYear; i++) {
resultBuilder.add(handleYear(String.valueOf(i)));
}
return resultBuilder.toString();
}
/**
* Return a string representation of the un-fielded (default fielded) term
*
* Default implementation: just return the term (in quotes if a space is contained)
*/
protected Optional<String> handleUnFieldedTerm(String term) {
return Optional.of(StringUtil.quoteStringIfSpaceIsContained(term));
}
protected String createKeyValuePair(String fieldAsString, String term) {
return createKeyValuePair(fieldAsString, term, ":");
}
protected String createKeyValuePair(String fieldAsString, String term, String separator) {
return String.format("%s%s%s", fieldAsString, separator, StringUtil.quoteStringIfSpaceIsContained(term));
}
/**
* Return a string representation of the provided field
* If it is not supported return an empty optional.
*/
protected Optional<String> handleOtherField(String fieldAsString, String term) {
return Optional.of(createKeyValuePair(fieldAsString, term));
}
private Optional<String> transform(QueryNode query) {
if (query instanceof BooleanQueryNode) {
return transform((BooleanQueryNode) query);
} else if (query instanceof FieldQueryNode) {
return transform((FieldQueryNode) query);
} else if (query instanceof GroupQueryNode) {
return transform(((GroupQueryNode) query).getChild());
} else if (query instanceof ModifierQueryNode) {
return transform((ModifierQueryNode) query);
} else {
LOGGER.error("Unsupported case when transforming the query:\n {}", query);
return Optional.empty();
}
}
/**
* Parses the given query string into a complex query using lucene.
* Note: For unique fields, the alphabetically and numerically first instance in the query string is used in the complex query.
*
* @param luceneQuery The lucene query tp transform
* @return A query string containing all fields that are contained in the original lucene query and
* that are expressible in the library specific query language, other information either is discarded or
* stored as part of the state of the transformer if it can be used e.g. as a URL parameter for the query.
*/
public Optional<String> transformLuceneQuery(QueryNode luceneQuery) {
Optional<String> transformedQuery = transform(luceneQuery);
transformedQuery = transformedQuery.map(this::removeOuterBraces);
return transformedQuery;
}
/**
* Removes the outer braces as they are unnecessary
*/
private String removeOuterBraces(String query) {
if (query.startsWith("(") && query.endsWith(")")) {
return query.substring(1, query.length() - 1);
}
return query;
}
}
| 9,159 | 35.787149 | 147 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/ArXivQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Optional;
public class ArXivQueryTransformer extends YearRangeByFilteringQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return " ANDNOT ";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("au", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("ti", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("jr", journalTitle);
}
/**
* Manual testing shows that this works if added as an unfielded term, might lead to false positives
*/
@Override
protected String handleYear(String year) {
startYear = Math.min(startYear, Integer.parseInt(year));
endYear = Math.max(endYear, Integer.parseInt(year));
return year;
}
@Override
protected Optional<String> handleUnFieldedTerm(String term) {
return Optional.of(createKeyValuePair("all", term));
}
}
| 1,302 | 24.54902 | 104 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/BiodiversityLibraryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
public class BiodiversityLibraryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return "+";
}
@Override
protected String getLogicalOrOperator() {
return "+";
}
@Override
protected String getLogicalNotOperator() {
return "+";
}
@Override
protected String handleAuthor(String author) {
return author;
}
@Override
protected String handleTitle(String title) {
return title;
}
@Override
protected String handleJournal(String journalTitle) {
return journalTitle;
}
@Override
protected String handleYear(String year) {
return year;
}
}
| 785 | 18.65 | 78 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/CiteSeerQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Calendar;
import java.util.Optional;
import org.jabref.model.strings.StringUtil;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONObject;
public class CiteSeerQueryTransformer extends AbstractQueryTransformer {
private JSONObject payload = new JSONObject();
/**
* Default values for necessary parameters set in constructor
*/
public CiteSeerQueryTransformer() {
handlePage("1");
handlePageSize("20");
this.getJSONPayload().put("must_have_pdf", "false");
handleSortBy("relevance");
}
@Override
protected String getLogicalAndOperator() {
return " ";
}
@Override
protected String getLogicalOrOperator() {
return " ";
}
@Override
protected String getLogicalNotOperator() {
return "";
}
@Override
protected String handleAuthor(String author) {
if (!getJSONPayload().has("author")) {
this.getJSONPayload().put("author", new JSONArray());
}
this.getJSONPayload().getJSONArray("author").put(author).toString();
return StringUtil.quoteStringIfSpaceIsContained(author);
}
@Override
protected String handleTitle(String title) {
this.getJSONPayload().put("queryString", title);
return StringUtil.quoteStringIfSpaceIsContained(title);
}
@Override
protected String handleJournal(String journalTitle) {
this.getJSONPayload().put("journal", journalTitle);
return StringUtil.quoteStringIfSpaceIsContained(journalTitle);
}
@Override
protected String handleYear(String year) {
this.getJSONPayload().put("yearStart", Integer.parseInt(year));
this.getJSONPayload().put("yearEnd", Integer.parseInt(year));
return StringUtil.quoteStringIfSpaceIsContained(year);
}
@Override
protected String handleYearRange(String yearRange) {
parseYearRange(yearRange);
if (endYear == Integer.MAX_VALUE) { // invalid year range
Calendar calendar = Calendar.getInstance();
this.getJSONPayload().put("yearEnd", calendar.get(Calendar.YEAR));
return "";
}
this.getJSONPayload().put("yearStart", startYear);
this.getJSONPayload().put("yearEnd", endYear);
return yearRange;
}
/**
* covers the five fields that are required to make a POST request
* except "must_have_pdf" as FullTextFetcher is not yet implemented for CiteSeer
*/
@Override
protected Optional<String> handleOtherField(String fieldAsString, String term) {
return switch (fieldAsString) {
case "page" -> handlePage(term);
case "pageSize" -> handlePageSize(term);
case "must_have_pdf" -> handleMustHavePdf(term);
case "sortBy" -> handleSortBy(term);
default -> super.handleOtherField(fieldAsString, term);
};
}
// as mentioned before, there may be a Jabref constant for page/page-size
private Optional<String> handlePage(String page) {
this.getJSONPayload().put("page", StringUtil.intValueOf(page));
return Optional.of(page);
}
private Optional<String> handlePageSize(String pageSize) {
this.getJSONPayload().put("pageSize", StringUtil.intValueOf(pageSize));
return Optional.of(pageSize);
}
private Optional<String> handleMustHavePdf(String mustHavePdf) {
this.getJSONPayload().put("must_have_pdf", mustHavePdf);
return Optional.of(mustHavePdf);
}
private Optional<String> handleSortBy(String sortBy) {
this.getJSONPayload().put("sortBy", sortBy);
return Optional.of(sortBy);
}
public JSONObject getJSONPayload() {
return this.payload;
}
}
| 3,860 | 30.909091 | 84 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/CollectionOfComputerScienceBibliographiesQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import org.jabref.model.strings.StringUtil;
public class CollectionOfComputerScienceBibliographiesQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "-";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("au", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("ti", title);
}
@Override
protected String handleJournal(String journalTitle) {
return StringUtil.quoteStringIfSpaceIsContained(journalTitle);
}
@Override
protected String handleYear(String year) {
return String.format("year:%s", year);
}
}
| 980 | 22.357143 | 105 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/DBLPQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import org.jabref.model.strings.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* DBLP does not support explicit year field search, thus we extend YearAndYearRangeByFilteringQueryTransformer
*/
public class DBLPQueryTransformer extends YearAndYearRangeByFilteringQueryTransformer {
private static final Logger LOGGER = LoggerFactory.getLogger(DBLPQueryTransformer.class);
@Override
protected String getLogicalAndOperator() {
return " ";
}
@Override
protected String getLogicalOrOperator() {
return "|";
}
@Override
protected String getLogicalNotOperator() {
LOGGER.warn("DBLP does not support Boolean NOT operator.");
return "";
}
@Override
protected String handleAuthor(String author) {
// DBLP does not support explicit author field search
return StringUtil.quoteStringIfSpaceIsContained(author);
}
@Override
protected String handleTitle(String title) {
// DBLP does not support explicit title field search
return StringUtil.quoteStringIfSpaceIsContained(title);
}
@Override
protected String handleJournal(String journalTitle) {
// DBLP does not support explicit journal field search
return StringUtil.quoteStringIfSpaceIsContained(journalTitle);
}
}
| 1,401 | 28.208333 | 111 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/DefaultLuceneQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
/**
* Transforms the query to a lucene query string
*/
public class DefaultLuceneQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "NOT ";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("author", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("title", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("journal", journalTitle);
}
@Override
protected String handleYear(String year) {
return createKeyValuePair("year", year);
}
}
| 969 | 21.55814 | 77 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/DefaultQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import org.jabref.model.strings.StringUtil;
/**
* Default query transformer without any boolean operators
*/
public class DefaultQueryTransformer extends YearAndYearRangeByFilteringQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " ";
}
@Override
protected String getLogicalOrOperator() {
return " ";
}
@Override
protected String getLogicalNotOperator() {
return "";
}
@Override
protected String handleAuthor(String author) {
return StringUtil.quoteStringIfSpaceIsContained(author);
}
@Override
protected String handleTitle(String title) {
return StringUtil.quoteStringIfSpaceIsContained(title);
}
@Override
protected String handleJournal(String journalTitle) {
return StringUtil.quoteStringIfSpaceIsContained(journalTitle);
}
}
| 945 | 22.65 | 90 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/GVKQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GVKQueryTransformer extends YearRangeByFilteringQueryTransformer {
private static final Logger LOGGER = LoggerFactory.getLogger(GVKQueryTransformer.class);
@Override
protected String getLogicalAndOperator() {
return " and ";
}
@Override
protected String getLogicalOrOperator() {
LOGGER.warn("GVK does not support Boolean OR operator");
return " ";
}
@Override
protected String getLogicalNotOperator() {
LOGGER.warn("GVK does not support Boolean NOT operator");
return " ";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("pica.per", author, "=");
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("pica.tit", title, "=");
}
@Override
protected String handleJournal(String journalTitle) {
// zti means "Zeitschrift", does not search for conferences (kon:)
return createKeyValuePair("pica.zti", journalTitle, "=");
}
@Override
protected String handleYear(String year) {
// "erj" means "Erscheinungsjahr"
return "pica.erj=" + year;
}
@Override
protected Optional<String> handleUnFieldedTerm(String term) {
// all does not search in full-text
// Other option is txt: but this does not search in meta data
return Optional.of(createKeyValuePair("pica.all", term, "="));
}
@Override
protected Optional<String> handleOtherField(String fieldAsString, String term) {
return Optional.of(createKeyValuePair("pica." + fieldAsString, term, "="));
}
}
| 1,804 | 28.112903 | 92 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/IEEEQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.model.strings.StringUtil;
/**
* Needs to be instantiated for each new query
*/
public class IEEEQueryTransformer extends YearRangeByFilteringQueryTransformer {
/**
* Returns words ignored by the engine. Need to be removed when querying for them.
* See ADR-0022
*/
private static final List<String> STOP_WORDS = List.of("a", "and", "for", "or", "with");
// These have to be integrated into the IEEE query URL as these are just supported as query parameters
// Journal is wrapped in quotes by the transformer
private String journal;
private String articleNumber;
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "NOT ";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("author", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("article_title", title);
}
@Override
protected String handleJournal(String journal) {
this.journal = journal;
return StringUtil.quoteStringIfSpaceIsContained(journal);
}
@Override
protected String handleYear(String year) {
startYear = Math.min(startYear, Integer.parseInt(year));
endYear = Math.max(endYear, Integer.parseInt(year));
return "";
}
@Override
protected Optional<String> handleOtherField(String fieldAsString, String term) {
return switch (fieldAsString) {
case "article_number" -> handleArticleNumber(term);
default -> super.handleOtherField(fieldAsString, term);
};
}
@Override
protected Optional<String> handleUnFieldedTerm(String term) {
if (STOP_WORDS.contains(term)) {
return Optional.empty();
}
return super.handleUnFieldedTerm(term);
}
private Optional<String> handleArticleNumber(String term) {
articleNumber = term;
return Optional.empty();
}
public Optional<String> getJournal() {
return Objects.isNull(journal) ? Optional.empty() : Optional.of(journal);
}
public Optional<String> getArticleNumber() {
return Objects.isNull(articleNumber) ? Optional.empty() : Optional.of(articleNumber);
}
}
| 2,598 | 27.56044 | 106 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/JstorQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
public class JstorQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "NOT ";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("au", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("ti", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("pt", journalTitle);
}
@Override
protected String handleYear(String year) {
return "sd:" + year + getLogicalAndOperator() + "ed: " + year;
}
@Override
protected String handleYearRange(String yearRange) {
parseYearRange(yearRange);
if (endYear == Integer.MAX_VALUE) {
return yearRange;
}
return "sd:" + Integer.toString(startYear) + getLogicalAndOperator() + "ed:" + Integer.toString(endYear);
}
}
| 1,224 | 24.520833 | 113 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/MedlineQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
/**
*
* Medline/Pubmed specific transformer which uses suffixes for searches
* see <a href="https://pubmed.ncbi.nlm.nih.gov/help/#search-tags">Pubmed help</a> for details
*
*/
public class MedlineQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "NOT ";
}
@Override
protected String handleAuthor(String author) {
return author + "[au]";
}
@Override
protected String handleTitle(String title) {
return title + "[ti]";
}
@Override
protected String handleJournal(String journalTitle) {
return journalTitle + "[ta]";
}
@Override
protected String handleYear(String year) {
return year + "[dp]";
}
@Override
protected String handleYearRange(String yearRange) {
parseYearRange(yearRange);
if (endYear == Integer.MAX_VALUE) {
return yearRange;
}
return Integer.toString(startYear) + ":" + Integer.toString(endYear) + "[dp]";
}
}
| 1,289 | 22.454545 | 94 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/ResearchGateQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import org.jabref.model.strings.StringUtil;
public class ResearchGateQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "NOT ";
}
@Override
protected String handleAuthor(String author) {
return StringUtil.quoteStringIfSpaceIsContained(author);
}
@Override
protected String handleTitle(String title) {
return StringUtil.quoteStringIfSpaceIsContained(title);
}
@Override
protected String handleJournal(String journalTitle) {
return StringUtil.quoteStringIfSpaceIsContained(journalTitle);
}
@Override
protected String handleYear(String year) {
return StringUtil.quoteStringIfSpaceIsContained(year);
}
}
| 1,006 | 22.97619 | 76 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/ScholarQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
public class ScholarQueryTransformer extends YearAndYearRangeByFilteringQueryTransformer {
// Start year and end year have to be integrated into the query URL itself as these are just supported as query parameters
private int startYear = Integer.MAX_VALUE;
private int endYear = Integer.MIN_VALUE;
@Override
protected String getLogicalAndOperator() {
return " AND ";
}
@Override
protected String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "-";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("author", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("allintitle", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("source", journalTitle);
}
}
| 1,028 | 26.078947 | 126 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/SpringerQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
/**
* This class converts a query string written in lucene syntax into a complex query.
*
* For simplicity this is currently limited to fielded data and the boolean AND operator.
*/
public class SpringerQueryTransformer extends AbstractQueryTransformer {
@Override
public String getLogicalAndOperator() {
return " AND ";
}
@Override
public String getLogicalOrOperator() {
return " OR ";
}
@Override
protected String getLogicalNotOperator() {
return "-";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("name", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("title", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("journal", journalTitle);
}
@Override
protected String handleYear(String year) {
return String.format("date:%s*", year);
}
}
| 1,082 | 23.066667 | 89 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/YearAndYearRangeByFilteringQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
/**
* This is a query transformer for a fetcher, which does not support server-side filtering by year and year-range.
* Thus, JabRef (as client) filters for years and year ranges on client-side.
*/
public abstract class YearAndYearRangeByFilteringQueryTransformer extends YearRangeByFilteringQueryTransformer {
@Override
protected String handleYear(String year) {
startYear = Math.min(startYear, Integer.parseInt(year));
endYear = Math.max(endYear, Integer.parseInt(year));
return "";
}
}
| 586 | 35.6875 | 114 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/YearRangeByFilteringQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Optional;
/**
* This is a query transformer for a fetcher, which does not support server-side filtering by year-range (e.g., only publications between 1999 and 2002).
* Thus, JabRef (as client) filters for year ranges on client-side.
*/
public abstract class YearRangeByFilteringQueryTransformer extends AbstractQueryTransformer {
public Optional<Integer> getStartYear() {
return startYear == Integer.MAX_VALUE ? Optional.empty() : Optional.of(startYear);
}
public Optional<Integer> getEndYear() {
return endYear == Integer.MIN_VALUE ? Optional.empty() : Optional.of(endYear);
}
/**
* The API does not support querying for a year range.
* Nevertheless, we store the start year and end year,
* because we filter it after fetching all results
*
* @return "", because the provider does not support server-side filtering, but our client filters
*/
@Override
protected String handleYearRange(String yearRange) {
parseYearRange(yearRange);
return "";
}
}
| 1,124 | 34.15625 | 153 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fetcher/transformers/ZbMathQueryTransformer.java | package org.jabref.logic.importer.fetcher.transformers;
import java.util.Optional;
public class ZbMathQueryTransformer extends AbstractQueryTransformer {
@Override
protected String getLogicalAndOperator() {
return " & ";
}
@Override
protected String getLogicalOrOperator() {
return " | ";
}
@Override
protected String getLogicalNotOperator() {
return "!";
}
@Override
protected String handleAuthor(String author) {
return createKeyValuePair("au", author);
}
@Override
protected String handleTitle(String title) {
return createKeyValuePair("ti", title);
}
@Override
protected String handleJournal(String journalTitle) {
return createKeyValuePair("so", journalTitle);
}
@Override
protected String handleYear(String year) {
return "py:" + year;
}
@Override
protected String handleYearRange(String yearRange) {
return "py:" + yearRange;
}
@Override
protected Optional<String> handleUnFieldedTerm(String term) {
return Optional.of(createKeyValuePair("any", term));
}
}
| 1,155 | 21.230769 | 70 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/ACMPortalParser.java | package org.jabref.logic.importer.fileformat;
import java.io.IOException;
import java.io.InputStream;
import java.net.CookieHandler;
import java.net.CookieManager;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.StringJoiner;
import java.util.stream.Collectors;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import com.google.common.base.CaseFormat;
import com.google.common.base.Enums;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.apache.http.client.utils.URIBuilder;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class ACMPortalParser implements Parser {
private static final String HOST = "https://dl.acm.org";
private static final String DOI_URL = "https://dl.acm.org/action/exportCiteProcCitation";
/**
* Parse the DOI of the ACM Portal search result page and obtain the corresponding BibEntry
*
* @param stream html stream
* @return BibEntry List
*/
@Override
public List<BibEntry> parseEntries(InputStream stream) throws ParseException {
List<BibEntry> bibEntries;
try {
bibEntries = getBibEntriesFromDoiList(this.parseDoiSearchPage(stream));
} catch (FetcherException e) {
throw new ParseException(e);
}
return bibEntries;
}
/**
* Parse all DOIs from the ACM Portal search results page
*
* @param stream html stream
* @return DOI list
*/
public List<String> parseDoiSearchPage(InputStream stream) throws ParseException {
List<String> doiList = new ArrayList<>();
try {
Document doc = Jsoup.parse(stream, null, HOST);
Elements doiHrefs = doc.select("div.issue-item__content-right > h5 > span > a");
for (Element elem : doiHrefs) {
String fullSegment = elem.attr("href");
String doi = fullSegment.substring(fullSegment.indexOf("10"));
doiList.add(doi);
}
} catch (IOException ex) {
throw new ParseException(ex);
}
return doiList;
}
/**
* Obtain BibEntry according to DOI
*
* @param doiList DOI List
* @return BibEntry List
*/
public List<BibEntry> getBibEntriesFromDoiList(List<String> doiList) throws FetcherException {
List<BibEntry> bibEntries = new ArrayList<>();
CookieHandler.setDefault(new CookieManager());
try (InputStream stream = new URLDownload(getUrlFromDoiList(doiList)).asInputStream()) {
String jsonString = new String((stream.readAllBytes()), StandardCharsets.UTF_8);
JsonElement jsonElement = JsonParser.parseString(jsonString);
if (jsonElement.isJsonObject()) {
JsonArray items = jsonElement.getAsJsonObject().getAsJsonArray("items");
for (JsonElement item : items) {
for (Map.Entry<String, JsonElement> entry : item.getAsJsonObject().entrySet()) {
bibEntries.add(parseBibEntry(entry.getValue().toString()));
}
}
}
} catch (IOException | URISyntaxException e) {
throw new FetcherException("A network error occurred while fetching from ", e);
}
return bibEntries;
}
/**
* Constructing the query url for the doi
*
* @param doiList DOI List
* @return query URL
*/
public URL getUrlFromDoiList(List<String> doiList) throws URISyntaxException, MalformedURLException {
URIBuilder uriBuilder = new URIBuilder(DOI_URL);
uriBuilder.addParameter("targetFile", "custom-bibtex");
uriBuilder.addParameter("format", "bibTex");
uriBuilder.addParameter("dois", String.join(",", doiList));
return uriBuilder.build().toURL();
}
private StandardEntryType typeStrToEnum(String typeStr) {
StandardEntryType type;
if ("PAPER_CONFERENCE".equals(typeStr)) {
type = StandardEntryType.Conference;
} else {
String upperUnderscoreTyeStr = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, typeStr);
type = Enums.getIfPresent(StandardEntryType.class, upperUnderscoreTyeStr).or(StandardEntryType.Article);
}
return type;
}
/**
* Parse BibEntry from query result xml
*
* @param jsonStr query result in JSON format
* @return BibEntry parsed from query result
*/
public BibEntry parseBibEntry(String jsonStr) {
JsonObject jsonObject = JsonParser.parseString(jsonStr).getAsJsonObject();
BibEntry bibEntry = new BibEntry();
if (jsonObject.has("type")) {
bibEntry.setType(typeStrToEnum(jsonObject.get("type").getAsString()));
}
if (jsonObject.has("author")) {
JsonArray authors = jsonObject.getAsJsonArray("author");
StringJoiner authorsJoiner = new StringJoiner(" and ");
for (JsonElement author : authors) {
JsonObject authorJsonObject = author.getAsJsonObject();
authorsJoiner.add(
authorJsonObject.get("given").getAsString() + " " + authorJsonObject.get("family").getAsString()
);
}
bibEntry.setField(StandardField.AUTHOR, authorsJoiner.toString());
}
if (jsonObject.has("issued")) {
JsonObject issued = jsonObject.get("issued").getAsJsonObject();
if (issued.has("date-parts")) {
JsonArray dateArray = issued.get("date-parts").getAsJsonArray().get(0).getAsJsonArray();
StandardField[] dateField = {StandardField.YEAR, StandardField.MONTH, StandardField.DAY};
for (int i = 0; i < dateArray.size(); i++) {
bibEntry.setField(dateField[i], dateArray.get(i).getAsString());
}
}
}
if (jsonObject.has("abstract")) {
bibEntry.setField(StandardField.ABSTRACT, jsonObject.get("abstract").getAsString());
}
if (jsonObject.has("collection-title")) {
bibEntry.setField(StandardField.SERIES, jsonObject.get("collection-title").getAsString());
}
if (jsonObject.has("container-title")) {
bibEntry.setField(StandardField.BOOKTITLE, jsonObject.get("container-title").getAsString());
}
if (jsonObject.has("DOI")) {
bibEntry.setField(StandardField.DOI, jsonObject.get("DOI").getAsString());
}
if (jsonObject.has("event-place")) {
bibEntry.setField(StandardField.LOCATION, jsonObject.get("event-place").getAsString());
}
if (jsonObject.has("ISBN")) {
bibEntry.setField(StandardField.ISBN, jsonObject.get("ISBN").getAsString());
}
if (jsonObject.has("keyword")) {
String[] keywords = jsonObject.get("keyword").getAsString().split(", ");
String sortedKeywords = Arrays.stream(keywords).sorted().collect(Collectors.joining(", "));
bibEntry.setField(StandardField.KEYWORDS, sortedKeywords);
}
if (jsonObject.has("number-of-pages")) {
bibEntry.setField(StandardField.PAGETOTAL, jsonObject.get("number-of-pages").getAsString());
}
if (jsonObject.has("page")) {
bibEntry.setField(StandardField.PAGES, jsonObject.get("page").getAsString());
}
if (jsonObject.has("publisher")) {
bibEntry.setField(StandardField.PUBLISHER, jsonObject.get("publisher").getAsString());
}
if (jsonObject.has("publisher-place")) {
bibEntry.setField(StandardField.ADDRESS, jsonObject.get("publisher-place").getAsString());
}
if (jsonObject.has("title")) {
bibEntry.setField(StandardField.TITLE, jsonObject.get("title").getAsString());
}
if (jsonObject.has("URL")) {
bibEntry.setField(StandardField.URL, jsonObject.get("URL").getAsString());
}
return bibEntry;
}
}
| 8,695 | 36.645022 | 120 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/BiblioscapeImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Imports a Biblioscape Tag File. The format is described on
* http://www.biblioscape.com/download/Biblioscape8.pdf Several
* Biblioscape field types are ignored. Others are only included in the BibTeX
* field "comment".
*/
public class BiblioscapeImporter extends Importer {
@Override
public String getName() {
return "Biblioscape";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.TXT;
}
@Override
public String getDescription() {
return "Imports a Biblioscape Tag File.\n" +
"Several Biblioscape field types are ignored. Others are only included in the BibTeX field \"comment\".";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) {
Objects.requireNonNull(reader);
return true;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibItems = new ArrayList<>();
String line;
Map<Field, String> hm = new HashMap<>();
Map<String, StringBuilder> lines = new HashMap<>();
StringBuilder previousLine = null;
while ((line = reader.readLine()) != null) {
if (line.isEmpty()) {
continue; // ignore empty lines, e.g. at file
}
// end
// entry delimiter -> item complete
if ("------".equals(line)) {
String[] type = new String[2];
String[] pages = new String[2];
String country = null;
String address = null;
String titleST = null;
String titleTI = null;
List<String> comments = new ArrayList<>();
// add item
for (Map.Entry<String, StringBuilder> entry : lines.entrySet()) {
if ("AU".equals(entry.getKey())) {
hm.put(StandardField.AUTHOR, entry.getValue()
.toString());
} else if ("TI".equals(entry.getKey())) {
titleTI = entry.getValue()
.toString();
} else if ("ST".equals(entry.getKey())) {
titleST = entry.getValue()
.toString();
} else if ("YP".equals(entry.getKey())) {
hm.put(StandardField.YEAR, entry
.getValue().toString());
} else if ("VL".equals(entry.getKey())) {
hm.put(StandardField.VOLUME, entry
.getValue().toString());
} else if ("NB".equals(entry.getKey())) {
hm.put(StandardField.NUMBER, entry
.getValue().toString());
} else if ("PS".equals(entry.getKey())) {
pages[0] = entry.getValue()
.toString();
} else if ("PE".equals(entry.getKey())) {
pages[1] = entry.getValue()
.toString();
} else if ("KW".equals(entry.getKey())) {
hm.put(StandardField.KEYWORDS, entry
.getValue().toString());
} else if ("RT".equals(entry.getKey())) {
type[0] = entry.getValue()
.toString();
} else if ("SB".equals(entry.getKey())) {
comments.add("Subject: "
+ entry.getValue());
} else if ("SA".equals(entry.getKey())) {
comments
.add("Secondary Authors: " + entry.getValue());
} else if ("NT".equals(entry.getKey())) {
hm.put(StandardField.NOTE, entry
.getValue().toString());
} else if ("PB".equals(entry.getKey())) {
hm.put(StandardField.PUBLISHER, entry
.getValue().toString());
} else if ("TA".equals(entry.getKey())) {
comments
.add("Tertiary Authors: " + entry.getValue());
} else if ("TT".equals(entry.getKey())) {
comments
.add("Tertiary Title: " + entry.getValue());
} else if ("ED".equals(entry.getKey())) {
hm.put(StandardField.EDITION, entry
.getValue().toString());
} else if ("TW".equals(entry.getKey())) {
type[1] = entry.getValue()
.toString();
} else if ("QA".equals(entry.getKey())) {
comments
.add("Quaternary Authors: " + entry.getValue());
} else if ("QT".equals(entry.getKey())) {
comments
.add("Quaternary Title: " + entry.getValue());
} else if ("IS".equals(entry.getKey())) {
hm.put(StandardField.ISBN, entry
.getValue().toString());
} else if ("AB".equals(entry.getKey())) {
hm.put(StandardField.ABSTRACT, entry
.getValue().toString());
} else if ("AD".equals(entry.getKey())) {
address = entry.getValue()
.toString();
} else if ("LG".equals(entry.getKey())) {
hm.put(StandardField.LANGUAGE, entry
.getValue().toString());
} else if ("CO".equals(entry.getKey())) {
country = entry.getValue()
.toString();
} else if ("UR".equals(entry.getKey()) || "AT".equals(entry.getKey())) {
String s = entry.getValue().toString().trim();
hm.put(s.startsWith("http://") || s.startsWith("ftp://") ? StandardField.URL
: StandardField.PDF, entry.getValue().toString());
} else if ("C1".equals(entry.getKey())) {
comments.add("Custom1: "
+ entry.getValue());
} else if ("C2".equals(entry.getKey())) {
comments.add("Custom2: "
+ entry.getValue());
} else if ("C3".equals(entry.getKey())) {
comments.add("Custom3: "
+ entry.getValue());
} else if ("C4".equals(entry.getKey())) {
comments.add("Custom4: "
+ entry.getValue());
} else if ("C5".equals(entry.getKey())) {
comments.add("Custom5: "
+ entry.getValue());
} else if ("C6".equals(entry.getKey())) {
comments.add("Custom6: "
+ entry.getValue());
} else if ("DE".equals(entry.getKey())) {
hm.put(StandardField.ANNOTE, entry
.getValue().toString());
} else if ("CA".equals(entry.getKey())) {
comments.add("Categories: "
+ entry.getValue());
} else if ("TH".equals(entry.getKey())) {
comments.add("Short Title: "
+ entry.getValue());
} else if ("SE".equals(entry.getKey())) {
hm.put(StandardField.CHAPTER, entry
.getValue().toString());
// else if (entry.getKey().equals("AC"))
// hm.put("",entry.getValue().toString());
// else if (entry.getKey().equals("LP"))
// hm.put("",entry.getValue().toString());
}
}
EntryType bibtexType = BibEntry.DEFAULT_TYPE;
// to find type, first check TW, then RT
for (int i = 1; (i >= 0) && BibEntry.DEFAULT_TYPE.equals(bibtexType); --i) {
if (type[i] == null) {
continue;
}
type[i] = type[i].toLowerCase(Locale.ROOT);
if (type[i].contains("article")) {
bibtexType = StandardEntryType.Article;
} else if (type[i].contains("journal")) {
bibtexType = StandardEntryType.Article;
} else if (type[i].contains("book section")) {
bibtexType = StandardEntryType.InBook;
} else if (type[i].contains("book")) {
bibtexType = StandardEntryType.Book;
} else if (type[i].contains("conference")) {
bibtexType = StandardEntryType.InProceedings;
} else if (type[i].contains("proceedings")) {
bibtexType = StandardEntryType.InProceedings;
} else if (type[i].contains("report")) {
bibtexType = StandardEntryType.TechReport;
} else if (type[i].contains("thesis")
&& type[i].contains("master")) {
bibtexType = StandardEntryType.MastersThesis;
} else if (type[i].contains("thesis")) {
bibtexType = StandardEntryType.PhdThesis;
}
}
// depending on bibtexType, decide where to place the titleRT and
// titleTI
if (bibtexType.equals(StandardEntryType.Article)) {
if (titleST != null) {
hm.put(StandardField.JOURNAL, titleST);
}
if (titleTI != null) {
hm.put(StandardField.TITLE, titleTI);
}
} else if (bibtexType.equals(StandardEntryType.InBook)) {
if (titleST != null) {
hm.put(StandardField.BOOKTITLE, titleST);
}
if (titleTI != null) {
hm.put(StandardField.TITLE, titleTI);
}
} else {
if (titleST != null) {
hm.put(StandardField.BOOKTITLE, titleST);
}
if (titleTI != null) {
hm.put(StandardField.TITLE, titleTI);
}
}
// concatenate pages
if ((pages[0] != null) || (pages[1] != null)) {
hm.put(StandardField.PAGES, (pages[0] == null ? "" : pages[0]) + (pages[1] == null ? "" : "--" + pages[1]));
}
// concatenate address and country
if (address != null) {
hm.put(StandardField.ADDRESS, address + (country == null ? "" : ", " + country));
}
if (!comments.isEmpty()) { // set comment if present
hm.put(StandardField.COMMENT, String.join(";", comments));
}
BibEntry b = new BibEntry(bibtexType);
b.setField(hm);
bibItems.add(b);
hm.clear();
lines.clear();
previousLine = null;
continue;
}
// new key
if (line.startsWith("--") && (line.length() >= 7)
&& "-- ".equals(line.substring(4, 7))) {
previousLine = new StringBuilder(line.substring(7));
lines.put(line.substring(2, 4), previousLine);
continue;
}
// continuation (folding) of previous line
if (previousLine == null) {
return new ParserResult();
}
previousLine.append(line.trim());
}
return new ParserResult(bibItems);
}
}
| 13,320 | 45.90493 | 128 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/BibtexImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.exporter.SaveConfiguration;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabaseModeDetection;
import org.jabref.model.util.FileUpdateMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is a full class to read .bib files. It is used for <code>--import</code> and <code>--importToOpen </code>, too.
*/
public class BibtexImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(BibtexImporter.class);
// Signature written at the top of the .bib file in earlier versions.
private static final String SIGNATURE = "This file was created with JabRef";
private final ImportFormatPreferences importFormatPreferences;
private final FileUpdateMonitor fileMonitor;
public BibtexImporter(ImportFormatPreferences importFormatPreferences, FileUpdateMonitor fileMonitor) {
this.importFormatPreferences = importFormatPreferences;
this.fileMonitor = fileMonitor;
}
/**
* @return true as we have no effective way to decide whether a file is in bibtex format or not. See
* https://github.com/JabRef/jabref/pull/379#issuecomment-158685726 for more details.
*/
@Override
public boolean isRecognizedFormat(BufferedReader reader) {
Objects.requireNonNull(reader);
return true;
}
@Override
public ParserResult importDatabase(Path filePath) throws IOException {
EncodingResult result = getEncodingResult(filePath);
// We replace unreadable characters
// Unfortunately, no warning will be issued to the user
// As this is a very seldom case, we accept that
CharsetDecoder decoder = result.encoding().newDecoder();
decoder.onMalformedInput(CodingErrorAction.REPLACE);
try (InputStreamReader inputStreamReader = new InputStreamReader(Files.newInputStream(filePath), decoder);
BufferedReader reader = new BufferedReader(inputStreamReader)) {
ParserResult parserResult = this.importDatabase(reader);
parserResult.getMetaData().setEncoding(result.encoding());
parserResult.getMetaData().setEncodingExplicitlySupplied(result.encodingExplicitlySupplied());
parserResult.setPath(filePath);
if (parserResult.getMetaData().getMode().isEmpty()) {
parserResult.getMetaData().setMode(BibDatabaseModeDetection.inferMode(parserResult.getDatabase()));
}
return parserResult;
}
}
public static Charset getEncoding(Path filePath) throws IOException {
return getEncodingResult(filePath).encoding();
}
/**
* Determines the encoding of the supplied BibTeX file. If a JabRef encoding information is present, this information is used.
* If there is none present, {@link com.ibm.icu.text.CharsetDetector#CharsetDetector()} is used.
*/
private static EncodingResult getEncodingResult(Path filePath) throws IOException {
// We want to check if there is a JabRef encoding heading in the file, because that would tell us
// which character encoding is used.
// In general, we have to use InputStream and not a Reader, because a Reader requires an encoding specification.
// We do not want to do a byte-by-byte reading or doing wild try/catch magic.
// We therefore use a charset detection library and then read JabRefs "% Encoding" mark
Charset detectedCharset;
try (InputStream inputStream = Files.newInputStream(filePath)) {
BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream);
bufferedInputStream.mark(8192);
detectedCharset = getCharset(bufferedInputStream);
bufferedInputStream.reset();
LOGGER.debug("Detected charset: {}", detectedCharset.name());
}
Charset encoding;
boolean encodingExplicitlySupplied;
try (BufferedReader reader = Files.newBufferedReader(filePath, detectedCharset)) {
Optional<Charset> suppliedEncoding = getSuppliedEncoding(reader);
LOGGER.debug("Supplied encoding: {}", suppliedEncoding);
encodingExplicitlySupplied = suppliedEncoding.isPresent();
// in case no encoding information is present, use the detected one
encoding = suppliedEncoding.orElse(detectedCharset);
LOGGER.debug("Encoding used to read the file: {}", encoding);
}
EncodingResult result = new EncodingResult(encoding, encodingExplicitlySupplied);
return result;
}
private record EncodingResult(Charset encoding, boolean encodingExplicitlySupplied) {
}
/**
* This method does not set the metadata encoding information. The caller needs to set the encoding of the supplied
* reader manually to the metadata
*/
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
return new BibtexParser(importFormatPreferences, fileMonitor).parse(reader);
}
@Override
public String getName() {
return "BibTeX";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.BIBTEX_DB;
}
@Override
public String getDescription() {
return "This importer enables `--importToOpen someEntry.bib`";
}
/**
* Searches the file for "Encoding: myEncoding" and returns the found supplied encoding.
*/
private static Optional<Charset> getSuppliedEncoding(BufferedReader reader) {
try {
String line;
while ((line = reader.readLine()) != null) {
line = line.trim();
// % = char 37, we might have some bom chars in front that we need to skip, so we use index of
var percentPos = line.indexOf('%', 0);
// Line does not start with %, so there are no comment lines for us and we can stop parsing
if (percentPos == -1) {
return Optional.empty();
}
// Only keep the part after %
line = line.substring(percentPos + 1).trim();
if (line.startsWith(BibtexImporter.SIGNATURE)) {
// Signature line, so keep reading and skip to next line
} else if (line.startsWith(SaveConfiguration.ENCODING_PREFIX)) {
// Line starts with "Encoding: ", so the rest of the line should contain the name of the encoding
// Except if there is already a @ symbol signaling the starting of a BibEntry
Integer atSymbolIndex = line.indexOf('@');
String encoding;
if (atSymbolIndex > 0) {
encoding = line.substring(SaveConfiguration.ENCODING_PREFIX.length(), atSymbolIndex);
} else {
encoding = line.substring(SaveConfiguration.ENCODING_PREFIX.length());
}
return Optional.of(Charset.forName(encoding));
} else {
// Line not recognized so stop parsing
return Optional.empty();
}
}
} catch (IOException e) {
LOGGER.error("Supplied encoding could not be determined", e);
}
return Optional.empty();
}
}
| 8,036 | 41.75 | 130 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/BibtexParser.java | package org.jabref.logic.importer.fileformat;
import java.io.IOException;
import java.io.InputStream;
import java.io.PushbackReader;
import java.io.Reader;
import java.io.StringWriter;
import java.util.Collection;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
import org.jabref.logic.bibtex.FieldContentFormatter;
import org.jabref.logic.bibtex.FieldWriter;
import org.jabref.logic.exporter.BibtexDatabaseWriter;
import org.jabref.logic.exporter.SaveConfiguration;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.util.MetaDataParser;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.OS;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.KeyCollisionException;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibtexString;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.FieldProperty;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryTypeFactory;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.util.DummyFileUpdateMonitor;
import org.jabref.model.util.FileUpdateMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for importing BibTeX-files.
* <p>
* Use:
* <p>
* <code>BibtexParser parser = new BibtexParser(reader);</code>
* <p>
* <code>ParserResult result = parser.parse();</code>
* <p>
* or
* <p>
* <code>ParserResult result = BibtexParser.parse(reader);</code>
* <p>
* Can be used stand-alone.
* <p>
* Main using method: {@link org.jabref.logic.importer.OpenDatabase#loadDatabase(java.nio.file.Path, org.jabref.logic.importer.ImportFormatPreferences, org.jabref.model.util.FileUpdateMonitor)}
* <p>
* Opposite class: {@link org.jabref.logic.exporter.BibDatabaseWriter}
*/
public class BibtexParser implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(BibtexParser.class);
private static final Integer LOOKAHEAD = 1024;
private final FieldContentFormatter fieldContentFormatter;
private final Deque<Character> pureTextFromFile = new LinkedList<>();
private final ImportFormatPreferences importFormatPreferences;
private PushbackReader pushbackReader;
private BibDatabase database;
private Set<BibEntryType> entryTypes;
private boolean eof;
private int line = 1;
private ParserResult parserResult;
private final MetaDataParser metaDataParser;
public BibtexParser(ImportFormatPreferences importFormatPreferences, FileUpdateMonitor fileMonitor) {
this.importFormatPreferences = Objects.requireNonNull(importFormatPreferences);
this.fieldContentFormatter = new FieldContentFormatter(importFormatPreferences.fieldPreferences());
this.metaDataParser = new MetaDataParser(fileMonitor);
}
public BibtexParser(ImportFormatPreferences importFormatPreferences) {
this(importFormatPreferences, new DummyFileUpdateMonitor());
}
/**
* Parses BibtexEntries from the given string and returns one entry found (or null if none found)
* <p>
* It is undetermined which entry is returned, so use this in case you know there is only one entry in the string.
*
* @return An {@code Optional<BibEntry>. Optional.empty()} if non was found or an error occurred.
*/
public static Optional<BibEntry> singleFromString(String bibtexString, ImportFormatPreferences importFormatPreferences) throws ParseException {
Collection<BibEntry> entries = new BibtexParser(importFormatPreferences).parseEntries(bibtexString);
if ((entries == null) || entries.isEmpty()) {
return Optional.empty();
}
return Optional.of(entries.iterator().next());
}
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
Reader reader;
try {
reader = Importer.getReader(inputStream);
return parse(reader).getDatabase().getEntries();
} catch (IOException e) {
throw new ParseException(e);
}
}
public Optional<BibEntry> parseSingleEntry(String bibtexString) throws ParseException {
return parseEntries(bibtexString).stream().findFirst();
}
/**
* Parses BibTeX data found when reading from reader.
* <p>
* The reader will be consumed.
* <p>
* Multiple calls to parse() return the same results
* <p>
* Handling of encoding is done at {@link BibtexImporter}
*/
public ParserResult parse(Reader in) throws IOException {
Objects.requireNonNull(in);
pushbackReader = new PushbackReader(in, BibtexParser.LOOKAHEAD);
String newLineSeparator = determineNewLineSeparator();
// BibTeX related contents
initializeParserResult(newLineSeparator);
parseDatabaseID();
skipWhitespace();
return parseFileContent();
}
private String determineNewLineSeparator() throws IOException {
String newLineSeparator = OS.NEWLINE;
StringWriter stringWriter = new StringWriter(BibtexParser.LOOKAHEAD);
int i = 0;
int currentChar;
do {
currentChar = pushbackReader.read();
stringWriter.append((char) currentChar);
i++;
} while ((i < BibtexParser.LOOKAHEAD) && (currentChar != '\r') && (currentChar != '\n'));
if (currentChar == '\r') {
newLineSeparator = "\r\n";
} else if (currentChar == '\n') {
newLineSeparator = "\n";
}
// unread all sneaked characters
pushbackReader.unread(stringWriter.toString().toCharArray());
return newLineSeparator;
}
private void initializeParserResult(String newLineSeparator) {
database = new BibDatabase();
database.setNewLineSeparator(newLineSeparator);
entryTypes = new HashSet<>(); // To store custom entry types parsed.
parserResult = new ParserResult(database, new MetaData(), entryTypes);
}
private void parseDatabaseID() throws IOException {
while (!eof) {
skipWhitespace();
char c = (char) read();
if (c == '%') {
skipWhitespace();
String label = parseTextToken().trim();
if (label.equals(BibtexDatabaseWriter.DATABASE_ID_PREFIX)) {
skipWhitespace();
database.setSharedDatabaseID(parseTextToken().trim());
}
} else if (c == '@') {
unread(c);
break;
}
}
}
private ParserResult parseFileContent() throws IOException {
Map<String, String> meta = new HashMap<>();
while (!eof) {
boolean found = consumeUncritically('@');
if (!found) {
break;
}
skipWhitespace();
// Try to read the entry type
String entryType = parseTextToken().toLowerCase(Locale.ROOT).trim();
if ("preamble".equals(entryType)) {
database.setPreamble(parsePreamble());
// Consume a new line which separates the preamble from the next part (if the file was written with JabRef)
skipOneNewline();
// the preamble is saved verbatim anyway, so the text read so far can be dropped
dumpTextReadSoFarToString();
} else if ("string".equals(entryType)) {
parseBibtexString();
} else if ("comment".equals(entryType)) {
parseJabRefComment(meta);
} else {
// Not a comment, preamble, or string. Thus, it is an entry
parseAndAddEntry(entryType);
}
skipWhitespace();
}
try {
parserResult.setMetaData(metaDataParser.parse(
meta,
importFormatPreferences.bibEntryPreferences().getKeywordSeparator()));
} catch (ParseException exception) {
parserResult.addException(exception);
}
parseRemainingContent();
checkEpilog();
return parserResult;
}
private void checkEpilog() {
// This is an incomplete and inaccurate try to verify if something went wrong with previous parsing activity even though there were no warnings so far
// regex looks for something like 'identifier = blabla ,'
if (!parserResult.hasWarnings() && Pattern.compile("\\w+\\s*=.*,").matcher(database.getEpilog()).find()) {
parserResult.addWarning("following BibTex fragment has not been parsed:\n" + database.getEpilog());
}
}
private void parseRemainingContent() {
database.setEpilog(dumpTextReadSoFarToString().trim());
}
private void parseAndAddEntry(String type) {
try {
// collect all comments and the entry type definition in front of the actual entry
// this is at least `@Type`
String commentsAndEntryTypeDefinition = dumpTextReadSoFarToString();
// remove first newline
// this is appended by JabRef during writing automatically
if (commentsAndEntryTypeDefinition.startsWith("\r\n")) {
commentsAndEntryTypeDefinition = commentsAndEntryTypeDefinition.substring(2);
} else if (commentsAndEntryTypeDefinition.startsWith("\n")) {
commentsAndEntryTypeDefinition = commentsAndEntryTypeDefinition.substring(1);
}
BibEntry entry = parseEntry(type);
// store comments collected without type definition
entry.setCommentsBeforeEntry(
commentsAndEntryTypeDefinition.substring(0, commentsAndEntryTypeDefinition.lastIndexOf('@')));
// store complete parsed serialization (comments, type definition + type contents)
String parsedSerialization = commentsAndEntryTypeDefinition + dumpTextReadSoFarToString();
entry.setParsedSerialization(parsedSerialization);
database.insertEntry(entry);
} catch (IOException ex) {
// This makes the parser more robust:
// If an exception is thrown when parsing an entry, drop the entry and try to resume parsing.
LOGGER.warn("Could not parse entry", ex);
parserResult.addWarning(Localization.lang("Error occurred when parsing entry") + ": '" + ex.getMessage()
+ "'. " + "\n\n" + Localization.lang("JabRef skipped the entry."));
}
}
private void parseJabRefComment(Map<String, String> meta) {
StringBuilder buffer;
try {
buffer = parseBracketedFieldContent();
} catch (IOException e) {
// if we get an IO Exception here, then we have an unbracketed comment,
// which means that we should just return and the comment will be picked up as arbitrary text
// by the parser
LOGGER.info("Found unbracketed comment");
return;
}
String comment = buffer.toString().replaceAll("[\\x0d\\x0a]", "");
if (comment.substring(0, Math.min(comment.length(), MetaData.META_FLAG.length())).equals(MetaData.META_FLAG)) {
if (comment.startsWith(MetaData.META_FLAG)) {
String rest = comment.substring(MetaData.META_FLAG.length());
int pos = rest.indexOf(':');
if (pos > 0) {
// We remove all line breaks in the metadata - these
// will have been inserted
// to prevent too long lines when the file was
// saved, and are not part of the data.
meta.put(rest.substring(0, pos), rest.substring(pos + 1));
// meta comments are always re-written by JabRef and not stored in the file
dumpTextReadSoFarToString();
}
}
} else if (comment.substring(0, Math.min(comment.length(), MetaData.ENTRYTYPE_FLAG.length()))
.equals(MetaData.ENTRYTYPE_FLAG)) {
// A custom entry type can also be stored in a
// "@comment"
Optional<BibEntryType> typ = MetaDataParser.parseCustomEntryType(comment);
if (typ.isPresent()) {
entryTypes.add(typ.get());
} else {
parserResult.addWarning(Localization.lang("Ill-formed entrytype comment in BIB file") + ": " + comment);
}
// custom entry types are always re-written by JabRef and not stored in the file
dumpTextReadSoFarToString();
}
}
private void parseBibtexString() throws IOException {
BibtexString bibtexString = parseString();
bibtexString.setParsedSerialization(dumpTextReadSoFarToString());
try {
database.addString(bibtexString);
} catch (KeyCollisionException ex) {
parserResult.addWarning(Localization.lang("Duplicate string name") + ": " + bibtexString.getName());
}
}
/**
* Puts all text that has been read from the reader, including newlines, etc., since the last call of this method into a string. Removes the JabRef file header, if it is found
*
* @return the text read so far
*/
private String dumpTextReadSoFarToString() {
String result = getPureTextFromFile();
int indexOfAt = result.indexOf("@");
// if there is no entry found, simply return the content (necessary to parse text remaining after the last entry)
if (indexOfAt == -1) {
return purgeEOFCharacters(result);
} else if (result.contains(BibtexDatabaseWriter.DATABASE_ID_PREFIX)) {
return purge(result, BibtexDatabaseWriter.DATABASE_ID_PREFIX);
} else if (result.contains(SaveConfiguration.ENCODING_PREFIX)) {
return purge(result, SaveConfiguration.ENCODING_PREFIX);
} else {
return result;
}
}
/**
* Purges the given stringToPurge (if it exists) from the given context
*
* @return a stripped version of the context
*/
private String purge(String context, String stringToPurge) {
// purge the given string line if it exists
int runningIndex = context.indexOf(stringToPurge);
int indexOfAt = context.indexOf("@");
while (runningIndex < indexOfAt) {
if (context.charAt(runningIndex) == '\n') {
break;
} else if (context.charAt(runningIndex) == '\r') {
if (context.charAt(runningIndex + 1) == '\n') {
runningIndex++;
}
break;
}
runningIndex++;
}
// strip empty lines
while ((runningIndex < indexOfAt) &&
((context.charAt(runningIndex) == '\r') ||
(context.charAt(runningIndex) == '\n'))) {
runningIndex++;
}
return context.substring(runningIndex);
}
private String getPureTextFromFile() {
StringBuilder entry = new StringBuilder();
while (!pureTextFromFile.isEmpty()) {
entry.append(pureTextFromFile.pollFirst());
}
return entry.toString();
}
/**
* Removes all eof characters from a StringBuilder and returns a new String with the resulting content
*
* @return a String without eof characters
*/
private String purgeEOFCharacters(String input) {
StringBuilder remainingText = new StringBuilder();
for (Character character : input.toCharArray()) {
if (!(isEOFCharacter(character))) {
remainingText.append(character);
}
}
return remainingText.toString();
}
private void skipWhitespace() throws IOException {
int character;
while (true) {
character = read();
if (isEOFCharacter(character)) {
eof = true;
return;
}
if (!Character.isWhitespace((char) character)) {
// found non-whitespace char
unread(character);
break;
}
}
}
private void skipSpace() throws IOException {
int character;
while (true) {
character = read();
if (isEOFCharacter(character)) {
eof = true;
return;
}
if ((char) character != ' ') {
// found non-space char
unread(character);
break;
}
}
}
private void skipOneNewline() throws IOException {
skipSpace();
if (peek() == '\r') {
read();
}
if (peek() == '\n') {
read();
}
}
private boolean isEOFCharacter(int character) {
return (character == -1) || (character == 65535);
}
private String skipAndRecordWhitespace(int character) throws IOException {
StringBuilder stringBuilder = new StringBuilder();
if (character != ' ') {
stringBuilder.append((char) character);
}
while (true) {
int nextCharacter = read();
if (isEOFCharacter(nextCharacter)) {
eof = true;
return stringBuilder.toString();
}
if (Character.isWhitespace((char) nextCharacter)) {
if (nextCharacter != ' ') {
stringBuilder.append((char) nextCharacter);
}
} else {
// found non-whitespace char
unread(nextCharacter);
break;
}
}
return stringBuilder.toString();
}
private int peek() throws IOException {
int character = read();
unread(character);
return character;
}
private char[] peekTwoCharacters() throws IOException {
char character1 = (char) read();
char character2 = (char) read();
unread(character2);
unread(character1);
return new char[] {
character1, character2
};
}
private int read() throws IOException {
int character = pushbackReader.read();
if (!isEOFCharacter(character)) {
pureTextFromFile.offerLast((char) character);
}
if (character == '\n') {
line++;
}
return character;
}
private void unread(int character) throws IOException {
if (character == '\n') {
line--;
}
pushbackReader.unread(character);
if (pureTextFromFile.getLast() == character) {
pureTextFromFile.pollLast();
}
}
private BibtexString parseString() throws IOException {
skipWhitespace();
consume('{', '(');
skipWhitespace();
LOGGER.debug("Parsing string name");
String name = parseTextToken();
LOGGER.debug("Parsed string name");
skipWhitespace();
LOGGER.debug("Now the contents");
consume('=');
String content = parseFieldContent(FieldFactory.parseField(name));
LOGGER.debug("Now I'm going to consume a }");
consume('}', ')');
// Consume new line which signals end of entry
skipOneNewline();
LOGGER.debug("Finished string parsing.");
return new BibtexString(name, content);
}
private String parsePreamble() throws IOException {
skipWhitespace();
String result = parseBracketedText();
// also "include" the newline in the preamble
skipOneNewline();
return result;
}
private BibEntry parseEntry(String entryType) throws IOException {
BibEntry result = new BibEntry(EntryTypeFactory.parse(entryType));
skipWhitespace();
consume('{', '(');
int character = peek();
if ((character != '\n') && (character != '\r')) {
skipWhitespace();
}
String key = parseKey();
result.setCitationKey(key);
skipWhitespace();
while (true) {
character = peek();
if ((character == '}') || (character == ')')) {
break;
}
if (character == ',') {
consume(',');
}
skipWhitespace();
character = peek();
if ((character == '}') || (character == ')')) {
break;
}
parseField(result);
}
consume('}', ')');
// Consume new line which signals end of entry
skipOneNewline();
return result;
}
private void parseField(BibEntry entry) throws IOException {
Field field = FieldFactory.parseField(parseTextToken().toLowerCase(Locale.ROOT));
skipWhitespace();
consume('=');
String content = parseFieldContent(field);
if (!content.isEmpty()) {
if (entry.hasField(field)) {
// The following hack enables the parser to deal with multiple
// author or
// editor lines, stringing them together instead of getting just
// one of them.
// Multiple author or editor lines are not allowed by the bibtex
// format, but
// at least one online database exports bibtex likes to do that, making
// it inconvenient
// for users if JabRef did not accept it.
if (field.getProperties().contains(FieldProperty.PERSON_NAMES)) {
entry.setField(field, entry.getField(field).get() + " and " + content);
} else if (StandardField.KEYWORDS == field) {
// multiple keywords fields should be combined to one
entry.addKeyword(content, importFormatPreferences.bibEntryPreferences().getKeywordSeparator());
}
} else {
entry.setField(field, content);
}
}
}
private String parseFieldContent(Field field) throws IOException {
skipWhitespace();
StringBuilder value = new StringBuilder();
int character;
while (((character = peek()) != ',') && (character != '}') && (character != ')')) {
if (eof) {
throw new IOException("Error in line " + line + ": EOF in mid-string");
}
if (character == '"') {
StringBuilder text = parseQuotedFieldExactly();
value.append(fieldContentFormatter.format(text, field));
} else if (character == '{') {
// Value is a string enclosed in brackets. There can be pairs
// of brackets inside a field, so we need to count the
// brackets to know when the string is finished.
StringBuilder text = parseBracketedFieldContent();
value.append(fieldContentFormatter.format(text, field));
} else if (Character.isDigit((char) character)) { // value is a number
String number = parseTextToken();
value.append(number);
} else if (character == '#') {
// Here, we hit the case of BibTeX string concatenation. E.g., "author = Kopp # Kolb".
// We did NOT hit org.jabref.logic.bibtex.FieldWriter#BIBTEX_STRING_START_END_SYMBOL
// See also ADR-0024
consume('#');
} else {
String textToken = parseTextToken();
if (textToken.isEmpty()) {
throw new IOException("Error in line " + line + " or above: "
+ "Empty text token.\nThis could be caused " + "by a missing comma between two fields.");
}
value.append(FieldWriter.BIBTEX_STRING_START_END_SYMBOL).append(textToken).append(FieldWriter.BIBTEX_STRING_START_END_SYMBOL);
}
skipWhitespace();
}
return value.toString();
}
/**
* This method is used to parse string labels, field names, entry type and numbers outside brackets.
*/
private String parseTextToken() throws IOException {
StringBuilder token = new StringBuilder(20);
while (true) {
int character = read();
if (character == -1) {
eof = true;
return token.toString();
}
if (Character.isLetterOrDigit((char) character) || (":-_*+./'".indexOf(character) >= 0)) {
token.append((char) character);
} else {
unread(character);
return token.toString();
}
}
}
/**
* Tries to restore the key
*
* @return rest of key on success, otherwise empty string
* @throws IOException on Reader-Error
*/
private String fixKey() throws IOException {
StringBuilder key = new StringBuilder();
int lookaheadUsed = 0;
char currentChar;
// Find a char which ends key (','&&'\n') or entryfield ('='):
do {
currentChar = (char) read();
key.append(currentChar);
lookaheadUsed++;
} while ((currentChar != ',') && (currentChar != '\n') && (currentChar != '=')
&& (lookaheadUsed < BibtexParser.LOOKAHEAD));
// Consumed a char too much, back into reader and remove from key:
unread(currentChar);
key.deleteCharAt(key.length() - 1);
// Restore if possible:
switch (currentChar) {
case '=':
// Get entryfieldname, push it back and take rest as key
key = key.reverse();
boolean matchedAlpha = false;
for (int i = 0; i < key.length(); i++) {
currentChar = key.charAt(i);
/// Skip spaces:
if (!matchedAlpha && (currentChar == ' ')) {
continue;
}
matchedAlpha = true;
// Begin of entryfieldname (e.g. author) -> push back:
unread(currentChar);
if ((currentChar == ' ') || (currentChar == '\n')) {
/*
* found whitespaces, entryfieldname completed -> key in
* keybuffer, skip whitespaces
*/
StringBuilder newKey = new StringBuilder();
for (int j = i; j < key.length(); j++) {
currentChar = key.charAt(j);
if (!Character.isWhitespace(currentChar)) {
newKey.append(currentChar);
}
}
// Finished, now reverse newKey and remove whitespaces:
key = newKey.reverse();
parserResult.addWarning(
Localization.lang("Line %0: Found corrupted citation key %1.", String.valueOf(line), key.toString()));
}
}
break;
case ',':
parserResult.addWarning(
Localization.lang("Line %0: Found corrupted citation key %1 (contains whitespaces).", String.valueOf(line), key.toString()));
break;
case '\n':
parserResult.addWarning(
Localization.lang("Line %0: Found corrupted citation key %1 (comma missing).", String.valueOf(line), key.toString()));
break;
default:
// No more lookahead, give up:
unreadBuffer(key);
return "";
}
return removeWhitespaces(key).toString();
}
/**
* returns a new <code>StringBuilder</code> which corresponds to <code>toRemove</code> without whitespaces
*
*/
private StringBuilder removeWhitespaces(StringBuilder toRemove) {
StringBuilder result = new StringBuilder();
char current;
for (int i = 0; i < toRemove.length(); ++i) {
current = toRemove.charAt(i);
if (!Character.isWhitespace(current)) {
result.append(current);
}
}
return result;
}
/**
* pushes buffer back into input
*
* @throws IOException can be thrown if buffer is bigger than LOOKAHEAD
*/
private void unreadBuffer(StringBuilder stringBuilder) throws IOException {
for (int i = stringBuilder.length() - 1; i >= 0; --i) {
unread(stringBuilder.charAt(i));
}
}
/**
* This method is used to parse the citation key of an entry.
*/
private String parseKey() throws IOException {
StringBuilder token = new StringBuilder(20);
while (true) {
int character = read();
if (character == -1) {
eof = true;
return token.toString();
}
if (!Character.isWhitespace((char) character) && (Character.isLetterOrDigit((char) character)
|| (character == ':') || ("#{}~,=\uFFFD".indexOf(character) == -1))) {
token.append((char) character);
} else {
if (Character.isWhitespace((char) character)) {
// We have encountered white space instead of the comma at
// the end of
// the key. Possibly the comma is missing, so we try to
// return what we
// have found, as the key and try to restore the rest in fixKey().
return token + fixKey();
} else if ((character == ',') || (character == '}')) {
unread(character);
return token.toString();
} else if (character == '=') {
// If we find a '=' sign, it is either an error, or
// the entry lacked a comma signifying the end of the key.
return token.toString();
} else {
throw new IOException("Error in line " + line + ":" + "Character '" + (char) character + "' is not "
+ "allowed in citation keys.");
}
}
}
}
private String parseBracketedText() throws IOException {
StringBuilder value = new StringBuilder();
consume('{', '(');
int brackets = 0;
while (!((isClosingBracketNext()) && (brackets == 0))) {
int character = read();
if (isEOFCharacter(character)) {
throw new IOException("Error in line " + line + ": EOF in mid-string");
} else if ((character == '{') || (character == '(')) {
brackets++;
} else if ((character == '}') || (character == ')')) {
brackets--;
}
// If we encounter whitespace of any kind, read it as a
// simple space, and ignore any others that follow immediately.
/*
* if (j == '\n') { if (peek() == '\n') value.append('\n'); } else
*/
if (Character.isWhitespace((char) character)) {
String whitespacesReduced = skipAndRecordWhitespace(character);
if (!(whitespacesReduced.isEmpty()) && !"\n\t".equals(whitespacesReduced)) { // &&
whitespacesReduced = whitespacesReduced.replace("\t", ""); // Remove tabulators.
value.append(whitespacesReduced);
} else {
value.append(' ');
}
} else {
value.append((char) character);
}
}
consume('}', ')');
return value.toString();
}
private boolean isClosingBracketNext() {
try {
int peek = peek();
boolean isCurlyBracket = peek == '}';
boolean isRoundBracket = peek == ')';
return isCurlyBracket || isRoundBracket;
} catch (IOException e) {
return false;
}
}
/**
* This is called if a field in the form of <code>field = {content}</code> is parsed.
* The global variable <code>character</code> contains <code>{</code>.
*/
private StringBuilder parseBracketedFieldContent() throws IOException {
StringBuilder value = new StringBuilder();
consume('{');
int brackets = 0;
char character;
char lastCharacter = '\0';
while (true) {
character = (char) read();
boolean isClosingBracket = false;
if (character == '}') {
if (lastCharacter == '\\') {
// We hit `\}`
// It could be that a user has a backslash at the end of the entry, but intended to put a file path
// We want to be relaxed at that case
// First described at https://github.com/JabRef/jabref/issues/9668
char[] nextTwoCharacters = peekTwoCharacters();
// Check for "\},\n" - Example context: ` path = {c:\temp\},\n`
// On Windows, it could be "\},\r\n", thus we rely in OS.NEWLINE.charAt(0) (which returns '\r' or '\n').
// In all cases, we should check for '\n' as the file could be encoded with Linux line endings on Windows.
if ((nextTwoCharacters[0] == ',') && ((nextTwoCharacters[1] == OS.NEWLINE.charAt(0)) || (nextTwoCharacters[1] == '\n'))) {
// We hit '\}\r` or `\}\n`
// Heuristics: Unwanted escaping of }
//
// Two consequences:
//
// 1. Keep `\` as read
// This is already done
//
// 2. Treat `}` as closing bracket
isClosingBracket = true;
} else {
isClosingBracket = false;
}
} else {
isClosingBracket = true;
}
}
if (isClosingBracket && (brackets == 0)) {
return value;
} else if (isEOFCharacter(character)) {
throw new IOException("Error in line " + line + ": EOF in mid-string");
} else if ((character == '{') && (!isEscapeSymbol(lastCharacter))) {
brackets++;
} else if (isClosingBracket) {
brackets--;
}
value.append(character);
lastCharacter = character;
}
}
private boolean isEscapeSymbol(char character) {
return '\\' == character;
}
private StringBuilder parseQuotedFieldExactly() throws IOException {
StringBuilder value = new StringBuilder();
consume('"');
int brackets = 0;
while (!((peek() == '"') && (brackets == 0))) {
int j = read();
if (isEOFCharacter(j)) {
throw new IOException("Error in line " + line + ": EOF in mid-string");
} else if (j == '{') {
brackets++;
} else if (j == '}') {
brackets--;
}
value.append((char) j);
}
consume('"');
return value;
}
private void consume(char expected) throws IOException {
int character = read();
if (character != expected) {
throw new IOException(
"Error in line " + line + ": Expected " + expected + " but received " + (char) character);
}
}
private boolean consumeUncritically(char expected) throws IOException {
int character;
do {
character = read();
} while ((character != expected) && (character != -1) && (character != 65535));
if (isEOFCharacter(character)) {
eof = true;
}
// Return true if we actually found the character we were looking for:
return character == expected;
}
private void consume(char firstOption, char secondOption) throws IOException {
// Consumes one of the two, doesn't care which appears.
int character = read();
if ((character != firstOption) && (character != secondOption)) {
throw new IOException("Error in line " + line + ": Expected " + firstOption + " or " + secondOption
+ " but received " + (char) character);
}
}
}
| 37,539 | 35.767875 | 193 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CffImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.BiblatexSoftwareField;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.StandardEntryType;
import com.fasterxml.jackson.annotation.JsonAnySetter;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
public class CffImporter extends Importer {
@Override
public String getName() {
return "CFF";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.CFF;
}
@Override
public String getId() {
return "cff";
}
@Override
public String getDescription() {
return "Importer for the CFF format. Is only used to cite software, one entry per file.";
}
// POJO classes for yaml data
private static class CffFormat {
private final HashMap<String, String> values = new HashMap<>();
@JsonProperty("authors")
private List<CffAuthor> authors;
@JsonProperty("identifiers")
private List<CffIdentifier> ids;
public CffFormat() {
}
@JsonAnySetter
private void setValues(String key, String value) {
values.put(key, value);
}
}
private static class CffAuthor {
private final HashMap<String, String> values = new HashMap<>();
public CffAuthor() {
}
@JsonAnySetter
private void setValues(String key, String value) {
values.put(key, value);
}
}
private static class CffIdentifier {
@JsonProperty("type")
private String type;
@JsonProperty("value")
private String value;
public CffIdentifier() {
}
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
CffFormat citation = mapper.readValue(reader, CffFormat.class);
HashMap<Field, String> entryMap = new HashMap<>();
StandardEntryType entryType = StandardEntryType.Software;
// Map CFF fields to JabRef Fields
HashMap<String, Field> fieldMap = getFieldMappings();
for (Map.Entry<String, String> property : citation.values.entrySet()) {
if (fieldMap.containsKey(property.getKey())) {
entryMap.put(fieldMap.get(property.getKey()), property.getValue());
} else if ("type".equals(property.getKey())) {
if ("dataset".equals(property.getValue())) {
entryType = StandardEntryType.Dataset;
}
} else if (getUnmappedFields().contains(property.getKey())) {
entryMap.put(new UnknownField(property.getKey()), property.getValue());
}
}
// Translate CFF author format to JabRef author format
String authorStr = citation.authors.stream()
.map(author -> author.values)
.map(vals -> vals.get("name") != null ?
new Author(vals.get("name"), "", "", "", "") :
new Author(vals.get("given-names"), null, vals.get("name-particle"),
vals.get("family-names"), vals.get("name-suffix")))
.collect(AuthorList.collect())
.getAsFirstLastNamesWithAnd();
entryMap.put(StandardField.AUTHOR, authorStr);
// Select DOI to keep
if ((entryMap.get(StandardField.DOI) == null) && (citation.ids != null)) {
List<CffIdentifier> doiIds = citation.ids.stream()
.filter(id -> "doi".equals(id.type))
.collect(Collectors.toList());
if (doiIds.size() == 1) {
entryMap.put(StandardField.DOI, doiIds.get(0).value);
}
}
// Select SWHID to keep
if (citation.ids != null) {
List<String> swhIds = citation.ids.stream()
.filter(id -> "swh".equals(id.type))
.map(id -> id.value)
.collect(Collectors.toList());
if (swhIds.size() == 1) {
entryMap.put(BiblatexSoftwareField.SWHID, swhIds.get(0));
} else if (swhIds.size() > 1) {
List<String> relSwhIds = swhIds.stream()
.filter(id -> id.split(":").length > 3) // quick filter for invalid swhids
.filter(id -> "rel".equals(id.split(":")[2]))
.collect(Collectors.toList());
if (relSwhIds.size() == 1) {
entryMap.put(BiblatexSoftwareField.SWHID, relSwhIds.get(0));
}
}
}
BibEntry entry = new BibEntry(entryType);
entry.setField(entryMap);
List<BibEntry> entriesList = new ArrayList<>();
entriesList.add(entry);
return new ParserResult(entriesList);
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
CffFormat citation;
try {
citation = mapper.readValue(reader, CffFormat.class);
return (citation != null) && (citation.values.get("title") != null);
} catch (IOException e) {
return false;
}
}
private HashMap<String, Field> getFieldMappings() {
HashMap<String, Field> fieldMappings = new HashMap<>();
fieldMappings.put("title", StandardField.TITLE);
fieldMappings.put("version", StandardField.VERSION);
fieldMappings.put("doi", StandardField.DOI);
fieldMappings.put("license", BiblatexSoftwareField.LICENSE);
fieldMappings.put("repository", BiblatexSoftwareField.REPOSITORY);
fieldMappings.put("url", StandardField.URL);
fieldMappings.put("abstract", StandardField.ABSTRACT);
fieldMappings.put("message", StandardField.COMMENT);
fieldMappings.put("date-released", StandardField.DATE);
fieldMappings.put("keywords", StandardField.KEYWORDS);
return fieldMappings;
}
private List<String> getUnmappedFields() {
List<String> fields = new ArrayList<>();
fields.add("commit");
fields.add("license-url");
fields.add("repository-code");
fields.add("repository-artifact");
return fields;
}
}
| 7,302 | 35.153465 | 121 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CitaviXmlImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PushbackInputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.StringJoiner;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import org.jabref.logic.formatter.bibtexfields.HtmlToLatexFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fileformat.citavi.CitaviExchangeData;
import org.jabref.logic.importer.fileformat.citavi.CitaviExchangeData.KnowledgeItems;
import org.jabref.logic.importer.fileformat.citavi.CitaviExchangeData.KnowledgeItems.KnowledgeItem;
import org.jabref.logic.importer.fileformat.citavi.CitaviExchangeData.Persons.Person;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Keyword;
import org.jabref.model.entry.KeywordList;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.IEEETranEntryType;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import jakarta.xml.bind.JAXBContext;
import jakarta.xml.bind.JAXBException;
import jakarta.xml.bind.Unmarshaller;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CitaviXmlImporter extends Importer implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(CitaviXmlImporter.class);
private static final byte UUID_LENGTH = 36;
private static final byte UUID_SEMICOLON_OFFSET_INDEX = 37;
private static final EnumSet<QuotationTypeMapping> QUOTATION_TYPES = EnumSet.allOf(QuotationTypeMapping.class);
private final HtmlToLatexFormatter htmlToLatexFormatter = new HtmlToLatexFormatter();
private final NormalizePagesFormatter pagesFormatter = new NormalizePagesFormatter();
private final Map<String, Author> knownPersons = new HashMap<>();
private final Map<String, Keyword> knownKeywords = new HashMap<>();
private final Map<String, String> knownPublishers = new HashMap<>();
private Map<String, String> refIdWithAuthors = new HashMap<>();
private Map<String, String> refIdWithEditors = new HashMap<>();
private Map<String, String> refIdWithKeywords = new HashMap<>();
private Map<String, String> refIdWithPublishers = new HashMap<>();
private CitaviExchangeData.Persons persons;
private CitaviExchangeData.Keywords keywords;
private CitaviExchangeData.Publishers publishers;
private KnowledgeItems knowledgeItems;
private CitaviExchangeData.ReferenceAuthors refAuthors;
private CitaviExchangeData.ReferenceEditors refEditors;
private CitaviExchangeData.ReferenceKeywords refKeywords;
private CitaviExchangeData.ReferencePublishers refPublishers;
private Unmarshaller unmarshaller;
@Override
public String getName() {
return "Citavi XML";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.CITAVI;
}
@Override
public String getId() {
return "citavi";
}
@Override
public String getDescription() {
return "Importer for the Citavi XML format.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
return false;
}
@Override
public boolean isRecognizedFormat(Path filePath) throws IOException {
try (BufferedReader reader = getReaderFromZip(filePath)) {
String str;
int i = 0;
while (((str = reader.readLine()) != null) && (i < 50)) {
if (str.toLowerCase(Locale.ROOT).contains("citaviexchangedata")) {
return true;
}
i++;
}
}
return false;
}
@Override
public ParserResult importDatabase(Path filePath) throws IOException {
try (BufferedReader reader = getReaderFromZip(filePath)) {
Object unmarshalledObject = unmarshallRoot(reader);
if (unmarshalledObject instanceof CitaviExchangeData data) {
List<BibEntry> bibEntries = parseDataList(data);
return new ParserResult(bibEntries);
} else {
return ParserResult.fromErrorMessage("File does not start with xml tag.");
}
} catch (JAXBException | XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
}
private List<BibEntry> parseDataList(CitaviExchangeData data) {
List<BibEntry> bibEntries = new ArrayList<>();
persons = data.getPersons();
keywords = data.getKeywords();
publishers = data.getPublishers();
knowledgeItems = data.getKnowledgeItems();
refAuthors = data.getReferenceAuthors();
refEditors = data.getReferenceEditors();
refKeywords = data.getReferenceKeywords();
refPublishers = data.getReferencePublishers();
if (refAuthors != null) {
this.refIdWithAuthors = buildPersonList(refAuthors.getOnetoN());
}
if (refEditors != null) {
this.refIdWithEditors = buildPersonList(refEditors.getOnetoN());
}
if (refKeywords != null) {
this.refIdWithKeywords = buildKeywordList(refKeywords.getOnetoN());
}
if (refPublishers != null) {
this.refIdWithPublishers = buildPublisherList(refPublishers.getOnetoN());
}
bibEntries = data
.getReferences().getReference()
.stream()
.map(this::parseData)
.collect(Collectors.toList());
return bibEntries;
}
private BibEntry parseData(CitaviExchangeData.References.Reference data) {
BibEntry entry = new BibEntry();
entry.setType(getType(data));
Optional.ofNullable(data.getTitle())
.ifPresent(value -> entry.setField(StandardField.TITLE, clean(value)));
Optional.ofNullable(data.getAbstract())
.ifPresent(value -> entry.setField(StandardField.ABSTRACT, clean(value)));
Optional.ofNullable(data.getYear())
.ifPresent(value -> entry.setField(StandardField.YEAR, clean(value)));
Optional.ofNullable(data.getDoi())
.ifPresent(value -> entry.setField(StandardField.DOI, clean(value)));
Optional.ofNullable(data.getIsbn())
.ifPresent(value -> entry.setField(StandardField.ISBN, clean(value)));
String pages = clean(getPages(data));
// Cleans also unicode minus signs
pages = pagesFormatter.format(pages);
entry.setField(StandardField.PAGES, pages);
Optional.ofNullable(data.getVolume())
.ifPresent(value -> entry.setField(StandardField.VOLUME, clean(value)));
Optional.ofNullable(getAuthorName(data))
.ifPresent(value -> entry.setField(StandardField.AUTHOR, clean(value)));
Optional.ofNullable(getEditorName(data))
.ifPresent(value -> entry.setField(StandardField.EDITOR, clean(value)));
Optional.ofNullable(getKeywords(data))
.ifPresent(value -> entry.setField(StandardField.KEYWORDS, clean(value)));
Optional.ofNullable(getPublisher(data))
.ifPresent(value -> entry.setField(StandardField.PUBLISHER, clean(value)));
Optional.ofNullable(getKnowledgeItem(data))
.ifPresent(value -> entry.setField(StandardField.COMMENT, StringUtil.unifyLineBreaks(value, "\n")));
return entry;
}
private EntryType getType(CitaviExchangeData.References.Reference data) {
return Optional.ofNullable(data.getReferenceType())
.map(CitaviXmlImporter::convertRefNameToType)
.orElse(StandardEntryType.Article);
}
private static EntryType convertRefNameToType(String refName) {
return switch (refName.toLowerCase().trim()) {
case "artwork", "generic", "musicalbum", "audioorvideodocument", "movie" -> StandardEntryType.Misc;
case "electronic article" -> IEEETranEntryType.Electronic;
case "book section" -> StandardEntryType.InBook;
case "book", "bookedited", "audiobook" -> StandardEntryType.Book;
case "report" -> StandardEntryType.Report;
// case "journal article" -> StandardEntryType.Article;
default -> StandardEntryType.Article;
};
}
private String getPages(CitaviExchangeData.References.Reference data) {
String tmpStr = "";
if ((data.getPageCount() != null) && (data.getPageRange() == null)) {
tmpStr = data.getPageCount();
} else if ((data.getPageCount() == null) && (data.getPageRange() != null)) {
tmpStr = data.getPageRange();
} else if ((data.getPageCount() == null) && (data.getPageRange() == null)) {
return tmpStr;
}
int count = 0;
String pages = "";
for (int i = tmpStr.length() - 1; i >= 0; i--) {
if (count == 2) {
pages = tmpStr.substring(i + 2, (tmpStr.length() - 1 - 5) + 1);
break;
} else {
if (tmpStr.charAt(i) == '>') {
count++;
}
}
}
return pages;
}
private String getAuthorName(CitaviExchangeData.References.Reference data) {
if (refAuthors == null) {
return null;
}
return this.refIdWithAuthors.get(data.getId());
}
private Map<String, String> buildPersonList(List<String> authorsOrEditors) {
Map<String, String> refToPerson = new HashMap<>();
for (String idStringsWithSemicolon : authorsOrEditors) {
String refId = idStringsWithSemicolon.substring(0, UUID_LENGTH);
String rest = idStringsWithSemicolon.substring(UUID_SEMICOLON_OFFSET_INDEX, idStringsWithSemicolon.length());
String[] personIds = rest.split(";");
List<Author> jabrefAuthors = new ArrayList<>();
for (String personId : personIds) {
// Store persons we already encountered, we can have the same author multiple times in the whole database
knownPersons.computeIfAbsent(personId, k -> {
Optional<Person> person = persons.getPerson().stream().filter(p -> p.getId().equals(k)).findFirst();
return person.map(p -> new Author(p.getFirstName(), "", "", p.getLastName(), "")).orElse(null);
});
jabrefAuthors.add(knownPersons.get(personId));
}
String stringifiedAuthors = AuthorList.of(jabrefAuthors).getAsLastFirstNamesWithAnd(false);
refToPerson.put(refId, stringifiedAuthors);
}
return refToPerson;
}
private Map<String, String> buildKeywordList(List<String> keywordsList) {
Map<String, String> refToKeywords = new HashMap<>();
for (String idStringsWithSemicolon : keywordsList) {
String refId = idStringsWithSemicolon.substring(0, UUID_LENGTH);
String rest = idStringsWithSemicolon.substring(UUID_SEMICOLON_OFFSET_INDEX, idStringsWithSemicolon.length());
String[] keywordIds = rest.split(";");
List<Keyword> jabrefKeywords = new ArrayList<>();
for (String keywordId : keywordIds) {
// store keywords already encountered
knownKeywords.computeIfAbsent(keywordId, k -> {
Optional<CitaviExchangeData.Keywords.Keyword> keyword = keywords.getKeyword().stream().filter(p -> p.getId().equals(k)).findFirst();
return keyword.map(kword -> new Keyword(kword.getName())).orElse(null);
});
jabrefKeywords.add(knownKeywords.get(keywordId));
}
KeywordList list = new KeywordList(List.copyOf(jabrefKeywords));
String stringifiedKeywords = list.toString();
refToKeywords.put(refId, stringifiedKeywords);
}
return refToKeywords;
}
private Map<String, String> buildPublisherList(List<String> publishersList) {
Map<String, String> refToPublishers = new HashMap<>();
for (String idStringsWithSemicolon : publishersList) {
String refId = idStringsWithSemicolon.substring(0, UUID_LENGTH);
String rest = idStringsWithSemicolon.substring(UUID_SEMICOLON_OFFSET_INDEX, idStringsWithSemicolon.length());
String[] publisherIds = rest.split(";");
List<String> jabrefPublishers = new ArrayList<>();
for (String pubId : publisherIds) {
// store publishers already encountered
knownPublishers.computeIfAbsent(pubId, k -> {
Optional<CitaviExchangeData.Publishers.Publisher> publisher = publishers.getPublisher().stream().filter(p -> p.getId().equals(k)).findFirst();
return publisher.map(p -> new String(p.getName())).orElse(null);
});
jabrefPublishers.add(knownPublishers.get(pubId));
}
String stringifiedKeywords = String.join(",", jabrefPublishers);
refToPublishers.put(refId, stringifiedKeywords);
}
return refToPublishers;
}
private String getEditorName(CitaviExchangeData.References.Reference data) {
if (refEditors == null) {
return null;
}
return this.refIdWithEditors.get(data.getId());
}
private String getKeywords(CitaviExchangeData.References.Reference data) {
if (refKeywords == null) {
return null;
}
return this.refIdWithKeywords.get(data.getId());
}
private String getPublisher(CitaviExchangeData.References.Reference data) {
if (refPublishers == null) {
return null;
}
return this.refIdWithPublishers.get(data.getId());
}
private String getKnowledgeItem(CitaviExchangeData.References.Reference data) {
StringJoiner comment = new StringJoiner("\n\n");
List<KnowledgeItem> foundItems = knowledgeItems.getKnowledgeItem().stream().filter(p -> data.getId().equals(p.getReferenceID())).toList();
for (KnowledgeItem knowledgeItem : foundItems) {
Optional<String> title = Optional.ofNullable(knowledgeItem.getCoreStatement()).filter(Predicate.not(String::isEmpty));
title.ifPresent(t -> comment.add("# " + cleanUpText(t)));
Optional<String> text = Optional.ofNullable(knowledgeItem.getText()).filter(Predicate.not(String::isEmpty));
text.ifPresent(t -> comment.add(cleanUpText(t)));
Optional<Integer> pages = Optional.ofNullable(knowledgeItem.getPageRangeNumber()).filter(range -> range != -1);
pages.ifPresent(p -> comment.add("page range: " + p));
Optional<String> quotationTypeDesc = Optional.ofNullable(knowledgeItem.getQuotationType()).flatMap(type ->
this.QUOTATION_TYPES.stream()
.filter(qt -> type == qt.getCitaviIndexType())
.map(QuotationTypeMapping::getName).findFirst());
quotationTypeDesc.ifPresent(qt -> comment.add(String.format("quotation type: %s", qt)));
Optional<Short> quotationIndex = Optional.ofNullable(knowledgeItem.getQuotationIndex());
quotationIndex.ifPresent(index -> comment.add(String.format("quotation index: %d", index)));
}
return comment.toString();
}
String cleanUpText(String text) {
String result = removeSpacesBeforeLineBreak(text);
result = result.replaceAll("(?<!\\\\)\\{", "\\\\{");
result = result.replaceAll("(?<!\\\\)}", "\\\\}");
return result;
}
private String removeSpacesBeforeLineBreak(String string) {
return string.replaceAll(" +\r\n", "\r\n")
.replaceAll(" +\n", "\n");
}
private void initUnmarshaller() throws JAXBException {
if (unmarshaller == null) {
// Lazy init because this is expensive
JAXBContext context = JAXBContext.newInstance("org.jabref.logic.importer.fileformat.citavi");
unmarshaller = context.createUnmarshaller();
}
}
private Object unmarshallRoot(BufferedReader reader) throws XMLStreamException, JAXBException {
initUnmarshaller();
XMLInputFactory xmlInputFactory = XMLInputFactory.newFactory();
XMLStreamReader xmlStreamReader = xmlInputFactory.createXMLStreamReader(reader);
// Go to the root element
while (!xmlStreamReader.isStartElement()) {
xmlStreamReader.next();
}
return unmarshaller.unmarshal(xmlStreamReader);
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException("CitaviXmlImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public List<BibEntry> parseEntries(InputStream inputStream) {
try {
return importDatabase(
new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))).getDatabase().getEntries();
} catch (IOException e) {
LOGGER.error(e.getLocalizedMessage(), e);
}
return Collections.emptyList();
}
private BufferedReader getReaderFromZip(Path filePath) throws IOException {
ZipInputStream zis = new ZipInputStream(new FileInputStream(filePath.toFile()));
ZipEntry zipEntry = zis.getNextEntry();
Path newFile = Files.createTempFile("citavicontent", ".xml");
while (zipEntry != null) {
Files.copy(zis, newFile, StandardCopyOption.REPLACE_EXISTING);
zipEntry = zis.getNextEntry();
}
zis.closeEntry();
InputStream stream = Files.newInputStream(newFile, StandardOpenOption.READ);
// check and delete the utf-8 BOM bytes
InputStream newStream = checkForUtf8BOMAndDiscardIfAny(stream);
// clean up the temp file
Files.delete(newFile);
return new BufferedReader(new InputStreamReader(newStream, StandardCharsets.UTF_8));
}
private static InputStream checkForUtf8BOMAndDiscardIfAny(InputStream inputStream) throws IOException {
PushbackInputStream pushbackInputStream = new PushbackInputStream(new BufferedInputStream(inputStream), 3);
byte[] bom = new byte[3];
if (pushbackInputStream.read(bom) != -1) {
if (!((bom[0] == (byte) 0xEF) && (bom[1] == (byte) 0xBB) && (bom[2] == (byte) 0xBF))) {
pushbackInputStream.unread(bom);
}
}
return pushbackInputStream;
}
private String clean(String input) {
String result = StringUtil.unifyLineBreaks(input, " ")
.trim()
.replaceAll(" +", " ");
return htmlToLatexFormatter.format(result);
}
enum QuotationTypeMapping {
IMAGE_QUOTATION(0, "Image quotation"),
DIRECT_QUOTATION(1, "Direct quotation"),
INDIRECT_QUOTATION(2, "Indirect quotation"),
SUMMARY(3, "Summary"),
COMMENT(4, "Comment"),
HIGHLIGHT(5, "Highlight"),
HIGHLIGHT_RED(6, "Highlight in red");
int citaviType;
String name;
QuotationTypeMapping(int citaviType, String name) {
this.name = name;
this.citaviType = citaviType;
}
String getName() {
return name;
}
int getCitaviIndexType() {
return citaviType;
}
}
}
| 21,191 | 40.149515 | 162 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CiteSeerParser.java | package org.jabref.logic.importer.fileformat;
import java.net.CookieHandler;
import java.net.CookieManager;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.importer.AuthorListParser;
import org.jabref.logic.importer.ParseException;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONObject;
public class CiteSeerParser {
public List<BibEntry> parseCiteSeerResponse(JSONArray jsonResponse) throws ParseException {
List<BibEntry> response = new ArrayList<>();
CookieHandler.setDefault(new CookieManager());
for (int i = 0; i < jsonResponse.length(); ++i) {
response.add(parseBibEntry(jsonResponse.getJSONObject(i)));
}
return response;
}
/***
* WARNING: The DOI for each parsed BibEntry is not a valid DOI.
* Cite Seer associates an id with each response as a unique hash.
* However, it is not a valid variation of a DOI value.
*
* @param jsonObj Search response as a JSON Object
* @return BibEntry
* @throws ParseException
*/
private BibEntry parseBibEntry(JSONObject jsonObj) throws ParseException {
BibEntry bibEntry = new BibEntry();
bibEntry.setField(StandardField.DOI, jsonObj.optString("id"));
bibEntry.setField(StandardField.TITLE, jsonObj.optString("title"));
bibEntry.setField(StandardField.VENUE, jsonObj.optString("venue"));
bibEntry.setField(StandardField.YEAR, jsonObj.optString("year"));
bibEntry.setField(StandardField.PUBLISHER, jsonObj.optString("publisher"));
bibEntry.setField(StandardField.ABSTRACT, jsonObj.optString("abstract"));
bibEntry.setField(StandardField.AUTHOR, parseAuthors(Optional.ofNullable(jsonObj.optJSONArray("authors"))));
bibEntry.setField(StandardField.JOURNAL, jsonObj.optString("journal"));
bibEntry.setField(StandardField.URL, jsonObj.optString("source"));
return bibEntry;
}
private String parseAuthors(Optional<JSONArray> authorsOpt) {
if (!authorsOpt.isPresent()) {
return "";
}
String separator = " and ";
JSONArray authorsArray = authorsOpt.get();
StringBuilder authorsStringBuilder = new StringBuilder();
for (int i = 0; i < authorsArray.length() - 1; i++) {
authorsStringBuilder.append(StringUtil.shaveString(authorsArray.getString(i))).append(separator);
}
authorsStringBuilder.append(authorsArray.getString(authorsArray.length() - 1));
return new AuthorListParser().parse(authorsStringBuilder.toString()).getAsLastFirstNamesWithAnd(false);
}
}
| 2,809 | 41.575758 | 116 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CoinsParser.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
/**
* implemented by reverse-engineering <a href="https://github.com/SeerLabs/CiteSeerX/blob/4df28a98083be2829ec4c56ebbac09eb7772d379/src/java/edu/psu/citeseerx/domain/BiblioTransformer.java#L155-L249">the implementation by CiteSeerX</a>
*/
public class CoinsParser implements Parser {
private final Pattern DOI = Pattern.compile("%3Fdoi%3D([^&]+)");
private final Pattern TITLE = Pattern.compile("&rft.atitle=([^&]+)");
private final Pattern JOURNAL = Pattern.compile("&rft.jtitle=([^&]+)");
private final Pattern YEAR = Pattern.compile("&rft.date=([^&]+)");
private final Pattern VOLUME = Pattern.compile("&rft.volume=([^&]+)");
private final Pattern PAGES = Pattern.compile("&rft.pages=([^&]+)");
private final Pattern ISSUE = Pattern.compile("&rft.issue=([^&]+)");
private final Pattern TYPE = Pattern.compile("&rft.genre=([^&]+)");
private final Pattern AUTHOR = Pattern.compile("&rft.au=([^&]+)");
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
String data = new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining(OS.NEWLINE));
BibEntry entry = new BibEntry();
appendData(data, entry, DOI, StandardField.DOI);
appendData(data, entry, TITLE, StandardField.TITLE);
appendData(data, entry, JOURNAL, StandardField.JOURNALTITLE);
appendData(data, entry, YEAR, StandardField.YEAR);
appendData(data, entry, VOLUME, StandardField.VOLUME);
appendData(data, entry, PAGES, StandardField.PAGES);
appendData(data, entry, ISSUE, StandardField.ISSUE);
Matcher matcherType = TYPE.matcher(data);
if (matcherType.find()) {
switch (matcherType.group(1)) {
case "article":
entry.setType(StandardEntryType.Article);
break;
case "unknown":
default:
entry.setType(StandardEntryType.Misc);
break;
}
}
List<String> authors = new ArrayList<>();
Matcher matcherAuthors = AUTHOR.matcher(data);
while (matcherAuthors.find()) {
String author = matcherAuthors.group(1);
authors.add(author);
}
entry.setField(StandardField.AUTHOR, authors.stream().collect(Collectors.joining(" and ")));
return Collections.singletonList(entry);
}
private void appendData(String data, BibEntry entry, Pattern pattern, Field field) {
Matcher matcher = pattern.matcher(data);
if (matcher.find()) {
entry.setField(field, matcher.group(1));
}
}
}
| 3,369 | 41.125 | 234 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CopacImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Importer for COPAC format.
* <p>
* Documentation can be found online at: <a href="http://copac.ac.uk/faq/#format">http://copac.ac.uk/faq/#format</a>
*/
public class CopacImporter extends Importer {
private static final Pattern COPAC_PATTERN = Pattern.compile("^\\s*TI- ");
@Override
public String getName() {
return "Copac";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.TXT;
}
@Override
public String getId() {
return "cpc";
}
@Override
public String getDescription() {
return "Importer for COPAC format.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
String str;
while ((str = reader.readLine()) != null) {
if (CopacImporter.COPAC_PATTERN.matcher(str).find()) {
return true;
}
}
return false;
}
private static void setOrAppend(BibEntry b, Field field, String value, String separator) {
if (b.hasField(field)) {
b.setField(field, b.getField(field).get() + separator + value);
} else {
b.setField(field, value);
}
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<String> entries = new LinkedList<>();
StringBuilder sb = new StringBuilder();
// Preprocess entries
String str;
while ((str = reader.readLine()) != null) {
if (str.length() < 4) {
continue;
}
String code = str.substring(0, 4);
if (" ".equals(code)) {
sb.append(' ').append(str.trim());
} else {
// beginning of a new item
if ("TI- ".equals(str.substring(0, 4))) {
if (sb.length() > 0) {
entries.add(sb.toString());
}
sb = new StringBuilder();
}
sb.append('\n').append(str);
}
}
if (sb.length() > 0) {
entries.add(sb.toString());
}
List<BibEntry> results = new LinkedList<>();
for (String entry : entries) {
// Copac does not contain enough information on the type of the
// document. A book is assumed.
BibEntry b = new BibEntry(StandardEntryType.Book);
String[] lines = entry.split("\n");
for (String line1 : lines) {
String line = line1.trim();
if (line.length() < 4) {
continue;
}
String code = line.substring(0, 4);
if ("TI- ".equals(code)) {
setOrAppend(b, StandardField.TITLE, line.substring(4).trim(), ", ");
} else if ("AU- ".equals(code)) {
setOrAppend(b, StandardField.AUTHOR, line.substring(4).trim(), " and ");
} else if ("PY- ".equals(code)) {
setOrAppend(b, StandardField.YEAR, line.substring(4).trim(), ", ");
} else if ("PU- ".equals(code)) {
setOrAppend(b, StandardField.PUBLISHER, line.substring(4).trim(), ", ");
} else if ("SE- ".equals(code)) {
setOrAppend(b, StandardField.SERIES, line.substring(4).trim(), ", ");
} else if ("IS- ".equals(code)) {
setOrAppend(b, StandardField.ISBN, line.substring(4).trim(), ", ");
} else if ("KW- ".equals(code)) {
setOrAppend(b, StandardField.KEYWORDS, line.substring(4).trim(), ", ");
} else if ("NT- ".equals(code)) {
setOrAppend(b, StandardField.NOTE, line.substring(4).trim(), ", ");
} else if ("PD- ".equals(code)) {
setOrAppend(b, new UnknownField("physicaldimensions"), line.substring(4).trim(), ", ");
} else if ("DT- ".equals(code)) {
setOrAppend(b, new UnknownField("documenttype"), line.substring(4).trim(), ", ");
} else {
setOrAppend(b, FieldFactory.parseField(StandardEntryType.Book, line.substring(0, 2)), line.substring(4).trim(), ", ");
}
}
results.add(b);
}
return new ParserResult(results);
}
}
| 5,150 | 33.57047 | 138 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/CustomImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.importer.ImportException;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.FileType;
/**
* Object with data for a custom importer.
*
* <p>Is also responsible for instantiating the class loader.</p>
*/
public class CustomImporter extends Importer {
private final String className;
private final Path basePath;
private final Importer importer;
public CustomImporter(String basePath, String className) throws ImportException {
this.basePath = Path.of(basePath);
this.className = className;
try {
importer = load(this.basePath.toUri().toURL(), this.className);
} catch (IOException | ReflectiveOperationException exception) {
throw new ImportException(exception);
}
}
private static Importer load(URL basePathURL, String className)
throws IOException, ReflectiveOperationException {
try (URLClassLoader cl = new URLClassLoader(new URL[]{basePathURL})) {
Class<?> clazz = Class.forName(className, true, cl);
return (Importer) clazz.getDeclaredConstructor().newInstance();
}
}
public List<String> getAsStringList() {
return Arrays.asList(basePath.toString().replace('\\', '/'), className);
}
public String getClassName() {
return className;
}
public Path getBasePath() {
return basePath;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof CustomImporter)) {
return false;
}
CustomImporter otherImporter = (CustomImporter) other;
return Objects.equals(className, otherImporter.className) && Objects.equals(basePath, otherImporter.basePath);
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return importer.isRecognizedFormat(input);
}
@Override
public ParserResult importDatabase(BufferedReader input) throws IOException {
return importer.importDatabase(input);
}
@Override
public String getName() {
return importer.getName();
}
@Override
public FileType getFileType() {
return importer.getFileType();
}
@Override
public String getId() {
return importer.getId();
}
@Override
public String getDescription() {
return importer.getDescription();
}
@Override
public int hashCode() {
return Objects.hash(className, basePath);
}
@Override
public String toString() {
return this.getName();
}
}
| 2,991 | 25.477876 | 118 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/EndnoteImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.jabref.logic.citationkeypattern.CitationKeyGenerator;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Importer for the Refer/Endnote format.
* modified to use article number for pages if pages are missing (some
* journals, e.g., Physical Review Letters, don't use pages anymore)
*
* check here for details on the format
* http://libguides.csuchico.edu/c.php?g=414245&p=2822898
*/
public class EndnoteImporter extends Importer {
private static final String ENDOFRECORD = "__EOREOR__";
private static final Pattern A_PATTERN = Pattern.compile("%A .*");
private static final Pattern E_PATTERN = Pattern.compile("%E .*");
private final ImportFormatPreferences preferences;
public EndnoteImporter(ImportFormatPreferences preferences) {
this.preferences = preferences;
}
@Override
public String getName() {
return "Refer/Endnote";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.ENDNOTE;
}
@Override
public String getId() {
return "refer";
}
@Override
public String getDescription() {
return "Importer for the Refer/Endnote format. Modified to use article number for pages if pages are missing.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// Our strategy is to look for the "%A *" line.
String str;
while ((str = reader.readLine()) != null) {
if (A_PATTERN.matcher(str).matches() || E_PATTERN.matcher(str).matches()) {
return true;
}
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
StringBuilder sb = new StringBuilder();
String str;
boolean first = true;
while ((str = reader.readLine()) != null) {
str = str.trim();
if (str.indexOf("%0") == 0) {
if (first) {
first = false;
} else {
sb.append(ENDOFRECORD);
}
sb.append(str);
} else {
sb.append(str);
}
sb.append('\n');
}
String[] entries = sb.toString().split(ENDOFRECORD);
Map<Field, String> hm = new HashMap<>();
String author;
EntryType type;
String editor;
String artnum;
for (String entry : entries) {
hm.clear();
author = "";
type = BibEntry.DEFAULT_TYPE;
editor = "";
artnum = "";
boolean isEditedBook = false;
String[] fields = entry.trim().substring(1).split("\n%");
for (String field : fields) {
if (field.length() < 3) {
continue;
}
/*
* Details of Refer format for Journal Article and Book:
*
* Generic Ref Journal Article Book Code Author %A Author Author Year %D
* Year Year Title %T Title Title Secondary Author %E Series Editor
* Secondary Title %B Journal Series Title Place Published %C City
* Publisher %I Publisher Volume %V Volume Volume Number of Volumes %6
* Number of Volumes Number %N Issue Pages %P Pages Number of Pages
* Edition %7 Edition Subsidiary Author %? Translator Alternate Title %J
* Alternate Journal Label %F Label Label Keywords %K Keywords Keywords
* Abstract %X Abstract Abstract Notes %O Notes Notes
*/
String prefix = field.substring(0, 1);
String val = field.substring(2);
if ("A".equals(prefix)) {
if ("".equals(author)) {
author = val;
} else {
author += " and " + val;
}
} else if ("E".equals(prefix)) {
if ("".equals(editor)) {
editor = val;
} else {
editor += " and " + val;
}
} else if ("T".equals(prefix)) {
hm.put(StandardField.TITLE, val);
} else if ("0".equals(prefix)) {
if (val.indexOf("Journal") == 0) {
type = StandardEntryType.Article;
} else if (val.indexOf("Book Section") == 0) {
type = StandardEntryType.InCollection;
} else if (val.indexOf("Book") == 0) {
type = StandardEntryType.Book;
} else if (val.indexOf("Edited Book") == 0) {
type = StandardEntryType.Book;
isEditedBook = true;
} else if (val.indexOf("Conference") == 0) {
type = StandardEntryType.InProceedings;
} else if (val.indexOf("Report") == 0) {
type = StandardEntryType.TechReport;
} else if (val.indexOf("Review") == 0) {
type = StandardEntryType.Article;
} else if (val.indexOf("Thesis") == 0) {
type = StandardEntryType.PhdThesis;
} else {
type = BibEntry.DEFAULT_TYPE; //
}
} else if ("7".equals(prefix)) {
hm.put(StandardField.EDITION, val);
} else if ("C".equals(prefix)) {
hm.put(StandardField.ADDRESS, val);
} else if ("D".equals(prefix)) {
hm.put(StandardField.YEAR, val);
} else if ("8".equals(prefix)) {
hm.put(StandardField.DATE, val);
} else if ("J".equals(prefix)) {
// "Alternate journal. Let's set it only if no journal
// has been set with %B.
hm.putIfAbsent(StandardField.JOURNAL, val);
} else if ("B".equals(prefix)) {
// This prefix stands for "journal" in a journal entry, and
// "series" in a book entry.
if (type.equals(StandardEntryType.Article)) {
hm.put(StandardField.JOURNAL, val);
} else if (type.equals(StandardEntryType.Book) || type.equals(StandardEntryType.InBook)) {
hm.put(StandardField.SERIES, val);
} else {
/* type = inproceedings */
hm.put(StandardField.BOOKTITLE, val);
}
} else if ("I".equals(prefix)) {
if (type.equals(StandardEntryType.PhdThesis)) {
hm.put(StandardField.SCHOOL, val);
} else {
hm.put(StandardField.PUBLISHER, val);
}
} else if ("P".equals(prefix)) {
// replace single dash page ranges (23-45) with double dashes (23--45):
hm.put(StandardField.PAGES, val.replaceAll("([0-9]) *- *([0-9])", "$1--$2"));
} else if ("V".equals(prefix)) {
hm.put(StandardField.VOLUME, val);
} else if ("N".equals(prefix)) {
hm.put(StandardField.NUMBER, val);
} else if ("U".equals(prefix)) {
hm.put(StandardField.URL, val);
} else if ("R".equals(prefix)) {
String doi = val;
if (doi.startsWith("doi:")) {
doi = doi.substring(4);
}
hm.put(StandardField.DOI, doi);
} else if ("O".equals(prefix)) {
// Notes may contain Article number
if (val.startsWith("Artn")) {
String[] tokens = val.split("\\s");
artnum = tokens[1];
} else {
hm.put(StandardField.NOTE, val);
}
} else if ("K".equals(prefix)) {
hm.put(StandardField.KEYWORDS, val);
} else if ("X".equals(prefix)) {
hm.put(StandardField.ABSTRACT, val);
} else if ("9".equals(prefix)) {
if (val.indexOf("Ph.D.") == 0) {
type = StandardEntryType.PhdThesis;
}
if (val.indexOf("Masters") == 0) {
type = StandardEntryType.MastersThesis;
}
} else if ("F".equals(prefix)) {
hm.put(InternalField.KEY_FIELD, CitationKeyGenerator.cleanKey(val, ""));
}
}
// For Edited Book, EndNote puts the editors in the author field.
// We want them in the editor field so that bibtex knows it's an edited book
if (isEditedBook && "".equals(editor)) {
editor = author;
author = "";
}
// fixauthorscomma
if (!"".equals(author)) {
hm.put(StandardField.AUTHOR, fixAuthor(author));
}
if (!"".equals(editor)) {
hm.put(StandardField.EDITOR, fixAuthor(editor));
}
// if pages missing and article number given, use the article number
if (((hm.get(StandardField.PAGES) == null) || "-".equals(hm.get(StandardField.PAGES))) && !"".equals(artnum)) {
hm.put(StandardField.PAGES, artnum);
}
BibEntry b = new BibEntry(type);
b.setField(hm);
if (!b.getFields().isEmpty()) {
bibitems.add(b);
}
}
return new ParserResult(bibitems);
}
/**
* We must be careful about the author names, since they can be presented differently
* by different sources. Normally each %A tag brings one name, and we get the authors
* separated by " and ". This is the correct behaviour.
* One source lists the names separated by comma, with a comma at the end. We can detect
* this format and fix it.
*
* @param s The author string
* @return The fixed author string
*/
private static String fixAuthor(String s) {
int index = s.indexOf(" and ");
if (index >= 0) {
return AuthorList.fixAuthorLastNameFirst(s);
}
// Look for the comma at the end:
index = s.lastIndexOf(',');
if (index == (s.length() - 1)) {
String mod = s.substring(0, s.length() - 1).replace(", ", " and ");
return AuthorList.fixAuthorLastNameFirst(mod);
} else {
return AuthorList.fixAuthorLastNameFirst(s);
}
}
}
| 11,820 | 39.482877 | 123 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/EndnoteXmlImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import javax.xml.XMLConstants;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.events.XMLEvent;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.KeywordList;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.IEEETranEntryType;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import com.google.common.base.Joiner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Importer for the Endnote XML format.
* <p>
* Based on dtd scheme downloaded from Article #122577 in http://kbportal.thomson.com.
*/
public class EndnoteXmlImporter extends Importer implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(EndnoteXmlImporter.class);
private final ImportFormatPreferences preferences;
public EndnoteXmlImporter(ImportFormatPreferences preferences) {
this.preferences = preferences;
}
private static String join(List<String> list, String string) {
return Joiner.on(string).join(list);
}
@Override
public String getName() {
return "EndNote XML";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.XML;
}
@Override
public String getId() {
return "endnote";
}
@Override
public String getDescription() {
return "Importer for the EndNote XML format.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
String str;
int i = 0;
while (((str = reader.readLine()) != null) && (i < 50)) {
if (str.toLowerCase(Locale.ENGLISH).contains("<records>")) {
return true;
}
i++;
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader input) throws IOException {
Objects.requireNonNull(input);
List<BibEntry> bibItems = new ArrayList<>();
try {
XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance();
// prevent xxe (https://rules.sonarsource.com/java/RSPEC-2755)
xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, "");
// required for reading Unicode characters such as ö
xmlInputFactory.setProperty(XMLInputFactory.IS_COALESCING, true);
XMLStreamReader reader = xmlInputFactory.createXMLStreamReader(input);
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
if ("record".equals(elementName)) {
parseRecord(reader, bibItems, elementName);
}
}
}
} catch (XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
return new ParserResult(bibItems);
}
private void parseRecord(XMLStreamReader reader, List<BibEntry> bibItems, String startElement)
throws XMLStreamException {
Map<Field, String> fields = new HashMap<>();
EntryType entryType = StandardEntryType.Article;
KeywordList keywordList = new KeywordList();
List<LinkedFile> linkedFiles = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "ref-type" -> {
String type = reader.getAttributeValue(null, "name");
entryType = convertRefNameToType(type);
}
case "contributors" -> {
handleAuthorList(reader, fields, elementName);
}
case "titles" -> {
handleTitles(reader, fields, elementName);
}
case "pages" -> {
parseStyleContent(reader, fields, StandardField.PAGES, elementName);
}
case "volume" -> {
parseStyleContent(reader, fields, StandardField.VOLUME, elementName);
}
case "number" -> {
parseStyleContent(reader, fields, StandardField.NUMBER, elementName);
}
case "dates" -> {
parseYear(reader, fields);
}
case "notes" -> {
parseStyleContent(reader, fields, StandardField.NOTE, elementName);
}
case "urls" -> {
handleUrlList(reader, fields, linkedFiles);
}
case "keywords" -> {
handleKeywordsList(reader, keywordList, elementName);
}
case "abstract" -> {
parseStyleContent(reader, fields, StandardField.ABSTRACT, elementName);
}
case "isbn" -> {
parseStyleContent(reader, fields, StandardField.ISBN, elementName);
}
case "electronic-resource-num" -> {
parseStyleContent(reader, fields, StandardField.DOI, elementName);
}
case "publisher" -> {
parseStyleContent(reader, fields, StandardField.PUBLISHER, elementName);
}
case "label" -> {
parseStyleContent(reader, fields, new UnknownField("endnote-label"), elementName);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
BibEntry entry = new BibEntry(entryType);
entry.putKeywords(keywordList, preferences.bibEntryPreferences().getKeywordSeparator());
entry.setField(fields);
entry.setFiles(linkedFiles);
bibItems.add(entry);
}
private static EntryType convertRefNameToType(String refName) {
return switch (refName.toLowerCase().trim()) {
case "artwork", "generic" -> StandardEntryType.Misc;
case "electronic article" -> IEEETranEntryType.Electronic;
case "book section" -> StandardEntryType.InBook;
case "book" -> StandardEntryType.Book;
case "report" -> StandardEntryType.Report;
// case "journal article" -> StandardEntryType.Article;
default -> StandardEntryType.Article;
};
}
private void handleAuthorList(XMLStreamReader reader, Map<Field, String> fields, String startElement) throws XMLStreamException {
List<String> authorNames = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "author" -> {
parseAuthor(reader, authorNames);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
fields.put(StandardField.AUTHOR, join(authorNames, " and "));
}
private void parseAuthor(XMLStreamReader reader, List<String> authorNames) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "style" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
authorNames.add(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && "author".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseStyleContent(XMLStreamReader reader, Map<Field, String> fields, Field field, String elementName) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String tag = reader.getName().getLocalPart();
if ("style".equals(tag)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
if ("abstract".equals(elementName) || "electronic-resource-num".equals(elementName) || "notes".equals(elementName)) {
putIfValueNotNull(fields, field, reader.getText().trim());
} else if ("isbn".equals(elementName) || "secondary-title".equals(elementName)) {
putIfValueNotNull(fields, field, clean(reader.getText()));
} else {
putIfValueNotNull(fields, field, reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(elementName)) {
break;
}
}
}
private void parseYear(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "style" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.YEAR, reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && "year".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void handleKeywordsList(XMLStreamReader reader, KeywordList keywordList, String startElement) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "keyword" -> {
parseKeyword(reader, keywordList);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
}
private void parseKeyword(XMLStreamReader reader, KeywordList keywordList) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "style" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
if (reader.getText() != null) {
keywordList.add(reader.getText());
}
}
}
}
}
if (isEndXMLEvent(reader) && "keyword".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void handleTitles(XMLStreamReader reader, Map<Field, String> fields, String startElement) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "title" -> {
List<String> titleStyleContent = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String tag = reader.getName().getLocalPart();
if ("style".equals(tag)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
if (reader.getText() != null) {
titleStyleContent.add((reader.getText()));
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(elementName)) {
break;
}
}
putIfValueNotNull(fields, StandardField.TITLE, clean(join(titleStyleContent, "")));
}
case "secondary-title" -> {
parseStyleContent(reader, fields, StandardField.JOURNAL, elementName);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
}
private void handleUrlList(XMLStreamReader reader, Map<Field, String> fields, List<LinkedFile> linkedFiles) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "related-urls" -> {
parseRelatedUrls(reader, fields);
}
case "pdf-urls" -> {
parsePdfUrls(reader, fields, linkedFiles);
}
}
}
if (isEndXMLEvent(reader) && "urls".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseRelatedUrls(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
if ("style".equals(elementName)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.URL, reader.getText());
}
}
} else if (isCharacterXMLEvent(reader)) {
String value = clean(reader.getText());
if (value.length() > 0) {
putIfValueNotNull(fields, StandardField.URL, clean(value));
}
}
if (isEndXMLEvent(reader) && "related-urls".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parsePdfUrls(XMLStreamReader reader, Map<Field, String> fields, List<LinkedFile> linkedFiles) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
if ("url".equals(elementName)) {
reader.next();
if (isStartXMLEvent(reader)) {
String tagName = reader.getName().getLocalPart();
if ("style".equals(tagName)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
try {
linkedFiles.add(new LinkedFile(new URL(reader.getText()), "PDF"));
} catch (
MalformedURLException e) {
LOGGER.info("Unable to parse {}", reader.getText());
}
}
}
}
}
}
if (isCharacterXMLEvent(reader)) {
try {
linkedFiles.add(new LinkedFile(new URL(reader.getText()), "PDF"));
} catch (
MalformedURLException e) {
LOGGER.info("Unable to parse {}", reader.getText());
}
}
if (isEndXMLEvent(reader) && "pdf-urls".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private String clean(String input) {
return StringUtil.unifyLineBreaks(input, " ")
.trim()
.replaceAll(" +", " ");
}
private void putIfValueNotNull(Map<Field, String> fields, Field field, String value) {
if (value != null) {
fields.put(field, value);
}
}
private boolean isCharacterXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.CHARACTERS;
}
private boolean isStartXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.START_ELEMENT;
}
private boolean isEndXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.END_ELEMENT;
}
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
try {
return importDatabase(
new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))).getDatabase().getEntries();
} catch (IOException e) {
LOGGER.error(e.getLocalizedMessage(), e);
}
return Collections.emptyList();
}
}
| 19,396 | 37.033333 | 146 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/InspecImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.EntryTypeFactory;
import org.jabref.model.entry.types.StandardEntryType;
/**
* INSPEC format importer.
*/
public class InspecImporter extends Importer {
private static final Pattern INSPEC_PATTERN = Pattern.compile("Record.*INSPEC.*");
@Override
public String getName() {
return "INSPEC";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.TXT;
}
@Override
public String getDescription() {
return "INSPEC format importer.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// Our strategy is to look for the "PY <year>" line.
String str;
while ((str = reader.readLine()) != null) {
if (INSPEC_PATTERN.matcher(str).find()) {
return true;
}
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
StringBuilder sb = new StringBuilder();
String str;
while ((str = reader.readLine()) != null) {
if (str.length() < 2) {
continue;
}
if (str.indexOf("Record") == 0) {
sb.append("__::__").append(str);
} else {
sb.append("__NEWFIELD__").append(str);
}
}
String[] entries = sb.toString().split("__::__");
EntryType type = BibEntry.DEFAULT_TYPE;
Map<Field, String> h = new HashMap<>();
for (String entry : entries) {
if (entry.indexOf("Record") != 0) {
continue;
}
h.clear();
String[] fields = entry.split("__NEWFIELD__");
for (String s : fields) {
String f3 = s.substring(0, 2);
String frest = s.substring(5);
if ("TI".equals(f3)) {
h.put(StandardField.TITLE, frest);
} else if ("PY".equals(f3)) {
h.put(StandardField.YEAR, frest);
} else if ("AU".equals(f3)) {
h.put(StandardField.AUTHOR,
AuthorList.fixAuthorLastNameFirst(frest.replace(",-", ", ").replace(";", " and ")));
} else if ("AB".equals(f3)) {
h.put(StandardField.ABSTRACT, frest);
} else if ("ID".equals(f3)) {
h.put(StandardField.KEYWORDS, frest);
} else if ("SO".equals(f3)) {
int m = frest.indexOf('.');
if (m >= 0) {
String jr = frest.substring(0, m);
h.put(StandardField.JOURNAL, jr.replace("-", " "));
frest = frest.substring(m);
m = frest.indexOf(';');
if (m >= 5) {
String yr = frest.substring(m - 5, m).trim();
h.put(StandardField.YEAR, yr);
frest = frest.substring(m);
m = frest.indexOf(':');
if (m >= 0) {
String pg = frest.substring(m + 1).trim();
h.put(StandardField.PAGES, pg);
String vol = frest.substring(1, m).trim();
h.put(StandardField.VOLUME, vol);
}
}
}
} else if ("RT".equals(f3)) {
frest = frest.trim();
if ("Journal-Paper".equals(frest)) {
type = StandardEntryType.Article;
} else if ("Conference-Paper".equals(frest) || "Conference-Paper; Journal-Paper".equals(frest)) {
type = StandardEntryType.InProceedings;
} else {
type = EntryTypeFactory.parse(frest.replace(" ", ""));
}
}
}
BibEntry b = new BibEntry(type);
b.setField(h);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
}
| 4,921 | 35.459259 | 117 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/IsiImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.formatter.casechanger.TitleCaseFormatter;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* Importer for the ISI Web of Science, INSPEC and Medline format.
* </p>
* <p>
* Documentation about ISI WOS format:
* <ul>
*
* <li>https://web.archive.org/web/20131031052339/http://wos.isitrial.com/help/helpprn.html</li>
* </ul>
* <p>
* <ul>
* <li>Deal with capitalization correctly</li>
* </ul>
* </p>
*/
public class IsiImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(IsiImporter.class);
private static final Pattern SUB_SUP_PATTERN = Pattern.compile("/(sub|sup)\\s+(.*?)\\s*/");
// 2006.09.05: Modified pattern to avoid false positives for other files due to an
// extra | at the end:
private static final Pattern ISI_PATTERN = Pattern.compile("FN ISI Export Format|VR 1.|PY \\d{4}");
private static final String EOL = "EOLEOL";
private static final Pattern EOL_PATTERN = Pattern.compile(EOL);
@Override
public String getName() {
return "ISI";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.ISI;
}
@Override
public String getId() {
return "isi";
}
@Override
public String getDescription() {
return "Importer for the ISI Web of Science, INSPEC and Medline format.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
String str;
int i = 0;
while (((str = reader.readLine()) != null) && (i < 50)) {
/*
* The following line gives false positives for RIS files, so it
* should not be uncommented. The hypen is a characteristic of the
* RIS format.
*
* str = str.replace(" - ", "")
*/
if (IsiImporter.ISI_PATTERN.matcher(str).find()) {
return true;
}
i++;
}
return false;
}
public static void processSubSup(Map<Field, String> map) {
Field[] subsup = {StandardField.TITLE, StandardField.ABSTRACT, StandardField.REVIEW, new UnknownField("notes")};
for (Field aSubsup : subsup) {
if (map.containsKey(aSubsup)) {
Matcher m = IsiImporter.SUB_SUP_PATTERN.matcher(map.get(aSubsup));
StringBuilder sb = new StringBuilder();
while (m.find()) {
String group2 = m.group(2);
group2 = group2.replaceAll("\\$", "\\\\\\\\\\\\\\$"); // Escaping
// insanity!
// :-)
if (group2.length() > 1) {
group2 = "{" + group2 + "}";
}
if ("sub".equals(m.group(1))) {
m.appendReplacement(sb, "\\$_" + group2 + "\\$");
} else {
m.appendReplacement(sb, "\\$^" + group2 + "\\$");
}
}
m.appendTail(sb);
map.put(aSubsup, sb.toString());
}
}
}
private static void processCapitalization(Map<Field, String> map) {
Field[] subsup = {StandardField.TITLE, StandardField.JOURNAL, StandardField.PUBLISHER};
for (Field aSubsup : subsup) {
if (map.containsKey(aSubsup)) {
String s = map.get(aSubsup);
if (s.toUpperCase(Locale.ROOT).equals(s)) {
s = new TitleCaseFormatter().format(s);
map.put(aSubsup, s);
}
}
}
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibEntries = new ArrayList<>();
StringBuilder sb = new StringBuilder();
// Pattern fieldPattern = Pattern.compile("^AU |^TI |^SO |^DT |^C1 |^AB
// |^ID |^BP |^PY |^SE |^PY |^VL |^IS ");
String str;
while ((str = reader.readLine()) != null) {
if (str.length() < 3) {
continue;
}
// beginning of a new item
if ("PT ".equals(str.substring(0, 3))) {
sb.append("::").append(str);
} else {
String beg = str.substring(0, 3).trim();
// I could have used the fieldPattern regular expression instead
// however this seems to be
// quick and dirty and it works!
if (beg.length() == 2) {
sb.append(" ## "); // mark the beginning of each field
sb.append(str);
} else {
sb.append(EOL); // mark the end of each line
sb.append(str.trim()); // remove the initial spaces
}
}
}
String[] entries = sb.toString().split("::");
Map<Field, String> hm = new HashMap<>();
// skip the first entry as it is either empty or has document header
for (String entry : entries) {
String[] fields = entry.split(" ## ");
if (fields.length == 0) {
fields = entry.split("\n");
}
EntryType type = BibEntry.DEFAULT_TYPE;
String PT = "";
String pages = "";
hm.clear();
for (String field : fields) {
// empty field don't do anything
if (field.length() <= 2) {
continue;
}
String beg = field.substring(0, 2);
String value = field.substring(3);
if (value.startsWith(" - ")) {
value = value.substring(3);
}
value = value.trim();
if ("PT".equals(beg)) {
if (value.startsWith("J")) {
PT = "article";
} else {
PT = value;
}
type = StandardEntryType.Article; // make all of them PT?
} else if ("TY".equals(beg)) {
if ("JOUR".equals(value)) {
type = StandardEntryType.Article;
} else if ("CONF".equals(value)) {
type = StandardEntryType.InProceedings;
}
} else if ("JO".equals(beg)) {
hm.put(StandardField.BOOKTITLE, value);
} else if ("AU".equals(beg)) {
String author = IsiImporter.isiAuthorsConvert(EOL_PATTERN.matcher(value).replaceAll(" and "));
// if there is already someone there then append with "and"
if (hm.get(StandardField.AUTHOR) != null) {
author = hm.get(StandardField.AUTHOR) + " and " + author;
}
hm.put(StandardField.AUTHOR, author);
} else if ("TI".equals(beg)) {
hm.put(StandardField.TITLE, EOL_PATTERN.matcher(value).replaceAll(" "));
} else if ("SO".equals(beg) || "JA".equals(beg)) {
hm.put(StandardField.JOURNAL, EOL_PATTERN.matcher(value).replaceAll(" "));
} else if ("ID".equals(beg) || "KW".equals(beg)) {
value = EOL_PATTERN.matcher(value).replaceAll(" ");
String existingKeywords = hm.get(StandardField.KEYWORDS);
if ((existingKeywords == null) || existingKeywords.contains(value)) {
existingKeywords = value;
} else {
existingKeywords += ", " + value;
}
hm.put(StandardField.KEYWORDS, existingKeywords);
} else if ("AB".equals(beg)) {
hm.put(StandardField.ABSTRACT, EOL_PATTERN.matcher(value).replaceAll(" "));
} else if ("BP".equals(beg) || "BR".equals(beg) || "SP".equals(beg)) {
pages = value;
} else if ("EP".equals(beg)) {
int detpos = value.indexOf(' ');
// tweak for IEEE Explore
if ((detpos != -1) && !value.substring(0, detpos).trim().isEmpty()) {
value = value.substring(0, detpos);
}
pages = pages + "--" + value;
} else if ("PS".equals(beg)) {
pages = IsiImporter.parsePages(value);
} else if ("AR".equals(beg)) {
pages = value;
} else if ("IS".equals(beg)) {
hm.put(StandardField.NUMBER, value);
} else if ("PY".equals(beg)) {
hm.put(StandardField.YEAR, value);
} else if ("VL".equals(beg)) {
hm.put(StandardField.VOLUME, value);
} else if ("PU".equals(beg)) {
hm.put(StandardField.PUBLISHER, value);
} else if ("DI".equals(beg)) {
hm.put(StandardField.DOI, value);
} else if ("PD".equals(beg)) {
String month = IsiImporter.parseMonth(value);
if (month != null) {
hm.put(StandardField.MONTH, month);
}
} else if ("DT".equals(beg)) {
if ("Review".equals(value)) {
type = StandardEntryType.Article; // set "Review" in Note/Comment?
} else if (value.startsWith("Article") || value.startsWith("Journal") || "article".equals(PT)) {
type = StandardEntryType.Article;
} else {
type = BibEntry.DEFAULT_TYPE;
}
} else if ("CR".equals(beg)) {
hm.put(new UnknownField("CitedReferences"), EOL_PATTERN.matcher(value).replaceAll(" ; ").trim());
} else {
// Preserve all other entries except
if ("ER".equals(beg) || "EF".equals(beg) || "VR".equals(beg) || "FN".equals(beg)) {
continue;
}
hm.put(FieldFactory.parseField(type, beg), value);
}
}
if (!"".equals(pages)) {
hm.put(StandardField.PAGES, pages);
}
// Skip empty entries
if (hm.isEmpty()) {
continue;
}
BibEntry b = new BibEntry(type);
// id assumes an existing database so don't
// Remove empty fields:
List<Object> toRemove = new ArrayList<>();
for (Map.Entry<Field, String> field : hm.entrySet()) {
String content = field.getValue();
if ((content == null) || content.trim().isEmpty()) {
toRemove.add(field.getKey());
}
}
for (Object aToRemove : toRemove) {
hm.remove(aToRemove);
}
// Polish entries
IsiImporter.processSubSup(hm);
IsiImporter.processCapitalization(hm);
b.setField(hm);
bibEntries.add(b);
}
return new ParserResult(bibEntries);
}
private static String parsePages(String value) {
return value.replace("-", "--");
}
/**
* Parses the month and returns it in the JabRef format
*/
static String parseMonth(String value) {
String[] parts = value.split("\\s|\\-");
for (String part1 : parts) {
Optional<Month> month = Month.getMonthByShortName(part1.toLowerCase(Locale.ROOT));
if (month.isPresent()) {
return month.get().getJabRefFormat();
}
}
// Try two digit month
for (String part : parts) {
try {
int number = Integer.parseInt(part);
Optional<Month> month = Month.getMonthByNumber(number);
if (month.isPresent()) {
return month.get().getJabRefFormat();
}
} catch (NumberFormatException e) {
LOGGER.info("The import file in ISI format cannot parse part of the content in PD into integers " +
"(If there is no month or PD displayed in the imported entity, this may be the reason)", e);
}
}
return null;
}
/**
* Will expand ISI first names.
* <p>
* Fixed bug from: http://sourceforge.net/tracker/index.php?func=detail&aid=1542552&group_id=92314&atid=600306
*/
public static String isiAuthorConvert(String author) {
String[] s = author.split(",");
if (s.length != 2) {
return author;
}
StringBuilder sb = new StringBuilder();
String last = s[0].trim();
sb.append(last).append(", ");
String first = s[1].trim();
String[] firstParts = first.split("\\s+");
for (int i = 0; i < firstParts.length; i++) {
first = firstParts[i];
// Do we have only uppercase chars?
if (first.toUpperCase(Locale.ROOT).equals(first)) {
first = first.replace(".", "");
for (int j = 0; j < first.length(); j++) {
sb.append(first.charAt(j)).append('.');
if (j < (first.length() - 1)) {
sb.append(' ');
}
}
} else {
sb.append(first);
}
if (i < (firstParts.length - 1)) {
sb.append(' ');
}
}
return sb.toString();
}
private static String[] isiAuthorsConvert(String[] authors) {
String[] result = new String[authors.length];
for (int i = 0; i < result.length; i++) {
result[i] = IsiImporter.isiAuthorConvert(authors[i]);
}
return result;
}
public static String isiAuthorsConvert(String authors) {
String[] s = IsiImporter.isiAuthorsConvert(authors.split(" and |;"));
return String.join(" and ", s);
}
}
| 15,252 | 35.577938 | 120 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/MarcXmlParser.java | package org.jabref.logic.importer.fileformat;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.DateTimeException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.jabref.logic.importer.AuthorListParser;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
/**
* A parser for the bavarian flavour (Bibliotheksverbund Bayern) of the marc xml standard
* <p>
* See <a href="https://www.dnb.de/DE/Professionell/Metadatendienste/Exportformate/MARC21/marc21_node.html">Feldbeschreibung
* der Titeldaten bei der Deutschen Nationalbibliothek</a>
* <p>
*
* <p>
* For further information see
* <ul>
* <li>https://www.bib-bvb.de/web/kkb-online/rda-felderverzeichnis-des-b3kat-aseq</li>
* <li>https://www.loc.gov/marc/bibliographic/ for detailed documentation</li>
* <li>for modifications in B3Kat https://www.bib-bvb.de/documents/10792/9f51a033-5ca1-42e2-b2d3-a75e7f1512d4</li>
* <li>https://www.dnb.de/DE/Professionell/Metadatendienste/Exportformate/MARC21/marc21_node.html</li>
* <li>https://www.dnb.de/SharedDocs/Downloads/DE/Professionell/Standardisierung/AGV/marc21VereinbarungDatentauschTeil1.pdf?__blob=publicationFile&v=2</li>
* <li>about multiple books in a series https://www.dnb.de/SharedDocs/Downloads/DE/Professionell/Standardisierung/marc21FormatumstiegAbbildungBegrenzterWerke2008.pdf?__blob=publicationFile&v=2></li>
* </ul>
*/
public class MarcXmlParser implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(MarcXmlParser.class);
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
try {
DocumentBuilder documentBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document content = documentBuilder.parse(inputStream);
return this.parseEntries(content);
} catch (ParserConfigurationException | SAXException | IOException exception) {
throw new ParseException(exception);
}
}
private List<BibEntry> parseEntries(Document content) {
List<BibEntry> result = new LinkedList<>();
Element root = (Element) content.getElementsByTagName("zs:searchRetrieveResponse").item(0);
Element srwrecords = getChild("zs:records", root);
if (srwrecords == null) {
// no records found, so return the empty list
return result;
}
List<Element> records = getChildren("zs:record", srwrecords);
for (Element element : records) {
Element e = getChild("zs:recordData", element);
if (e != null) {
e = getChild("record", e);
if (e != null) {
result.add(parseEntry(e));
}
}
}
return result;
}
private BibEntry parseEntry(Element element) {
BibEntry bibEntry = new BibEntry(BibEntry.DEFAULT_TYPE);
List<Element> datafields = getChildren("datafield", element);
for (Element datafield : datafields) {
String tag = datafield.getAttribute("tag");
LOGGER.debug("tag: " + tag);
if ("020".equals(tag)) {
putIsbn(bibEntry, datafield);
} else if ("100".equals(tag) || "700".equals(tag) || "710".equals(tag)) {
putPersonalName(bibEntry, datafield); // Author, Editor, Publisher
} else if ("245".equals(tag)) {
putTitle(bibEntry, datafield);
} else if ("250".equals(tag)) {
putEdition(bibEntry, datafield);
} else if ("264".equals(tag)) {
putPublication(bibEntry, datafield);
} else if ("300".equals(tag)) {
putPhysicalDescription(bibEntry, datafield);
} else if ("490".equals(tag) || "830".equals(tag)) {
putSeries(bibEntry, datafield);
} else if ("520".equals(tag)) {
putSummary(bibEntry, datafield);
} else if ("653".equals(tag)) {
putKeywords(bibEntry, datafield);
} else if ("856".equals(tag)) {
putElectronicLocation(bibEntry, datafield);
} else if ("966".equals(tag)) {
putDoi(bibEntry, datafield);
} else if (Integer.parseInt(tag) >= 546 && Integer.parseInt(tag) <= 599) {
// Notes
// FixMe: Some notes seem to have tags lower than 546
putNotes(bibEntry, datafield);
} else {
LOGGER.debug("Unparsed tag: {}", tag);
}
}
/*
* ToDo:
* pages
* volume and number correct
* series and journals stored in different tags
* thesis
* proceedings
*/
return bibEntry;
}
private void putIsbn(BibEntry bibEntry, Element datafield) {
String isbn = getSubfield("a", datafield);
if (StringUtil.isNullOrEmpty(isbn)) {
LOGGER.debug("Empty ISBN recieved");
return;
}
int length = isbn.length();
if (length != 10 && length != 13) {
LOGGER.debug("Malformed ISBN recieved, length: " + length);
return;
}
Optional<String> field = bibEntry.getField(StandardField.ISBN);
if (field.isPresent()) {
// Only overwrite the field, if it's ISBN13
if (field.get().length() == 13) {
bibEntry.setField(StandardField.ISBN, isbn);
}
} else {
bibEntry.setField(StandardField.ISBN, isbn);
}
}
private void putPersonalName(BibEntry bibEntry, Element datafield) {
String author = getSubfield("a", datafield);
String relation = getSubfield("4", datafield);
AuthorList name;
if (StringUtil.isNotBlank(author) && StringUtil.isNotBlank(relation)) {
name = new AuthorListParser().parse(author);
Optional<StandardField> field = Optional.ofNullable(
switch (relation) {
case "aut" ->
StandardField.AUTHOR;
case "edt" ->
StandardField.EDITOR;
case "pbl" ->
StandardField.PUBLISHER;
default ->
null;
});
if (field.isPresent()) {
String ind1 = datafield.getAttribute("ind1");
String brackedName;
if (field.get() == StandardField.PUBLISHER && StringUtil.isNotBlank(ind1) && "2".equals(ind1)) {
// ind == 2 -> Corporate publisher
brackedName = "{" + name.getAsFirstLastNamesWithAnd() + "}";
} else {
brackedName = name.getAsLastFirstNamesWithAnd(false);
}
if (bibEntry.getField(field.get()).isPresent()) {
bibEntry.setField(field.get(), bibEntry.getField(field.get()).get().concat(" and " + brackedName));
} else {
bibEntry.setField(field.get(), brackedName);
}
}
}
}
private void putTitle(BibEntry bibEntry, Element datafield) {
String title = getSubfield("a", datafield);
String subtitle = getSubfield("b", datafield);
String responsibility = getSubfield("c", datafield);
String number = getSubfield("n", datafield);
String part = getSubfield("p", datafield);
if (StringUtil.isNotBlank(title)) {
bibEntry.setField(StandardField.TITLE, title);
}
if (StringUtil.isNotBlank(subtitle)) {
bibEntry.setField(StandardField.SUBTITLE, subtitle);
}
if (StringUtil.isNotBlank(responsibility)) {
bibEntry.setField(StandardField.TITLEADDON, responsibility);
}
if (StringUtil.isNotBlank(number)) {
bibEntry.setField(StandardField.NUMBER, number);
}
if (StringUtil.isNotBlank(part)) {
bibEntry.setField(StandardField.PART, part);
}
}
private void putEdition(BibEntry bibEntry, Element datafield) {
String edition = getSubfield("a", datafield); // e.g. '1st ed. 2020'
String editionAddendum = getSubfield("b", datafield); // e.g. 'revised by N.N.'
if (StringUtil.isNullOrEmpty(edition)) {
return;
}
if (StringUtil.isNotBlank(editionAddendum)) {
edition = edition.concat(", " + editionAddendum);
}
bibEntry.setField(StandardField.EDITION, edition);
}
private void putPublication(BibEntry bibEntry, Element datafield) {
String ind2 = datafield.getAttribute("ind2");
if (StringUtil.isNotBlank(ind2) && "1".equals(ind2)) { // Publisher
String place = getSubfield("a", datafield);
String name = getSubfield("b", datafield);
String date = getSubfield("c", datafield);
if (StringUtil.isNotBlank(place)) {
bibEntry.setField(StandardField.LOCATION, place);
}
if (StringUtil.isNotBlank(name)) {
bibEntry.setField(StandardField.PUBLISHER, "{" + name + "}");
}
if (StringUtil.isNotBlank(date)) {
String strippedDate = StringUtil.stripBrackets(date);
try {
Date.parse(strippedDate).ifPresent(bibEntry::setDate);
} catch (DateTimeException exception) {
// cannot read date value, just copy it in plain text
LOGGER.info("Cannot parse date '{}'", strippedDate);
bibEntry.setField(StandardField.DATE, StringUtil.stripBrackets(strippedDate));
}
}
}
}
private void putPhysicalDescription(BibEntry bibEntry, Element datafield) {
String pagetotal = getSubfield("a", datafield);
if (StringUtil.isNotBlank(pagetotal) && (pagetotal.contains("pages") || pagetotal.contains("p."))) {
pagetotal = pagetotal.replaceAll(" p\\.?$", "");
bibEntry.setField(StandardField.PAGETOTAL, pagetotal);
}
}
private void putSeries(BibEntry bibEntry, Element datafield) {
// tag 490 - Series
// tag 830 - Series Added Entry
String name = getSubfield("a", datafield);
String volume = getSubfield("v", datafield);
String issn = getSubfield("x", datafield);
if (StringUtil.isNotBlank(name)) {
bibEntry.setField(StandardField.SERIES, name);
}
if (StringUtil.isNotBlank(volume)) {
bibEntry.setField(StandardField.VOLUME, volume);
}
if (StringUtil.isNotBlank(issn)) {
bibEntry.setField(StandardField.ISSN, issn);
}
}
private void putSummary(BibEntry bibEntry, Element datafield) {
String summary = getSubfield("a", datafield);
String ind1 = datafield.getAttribute("ind1");
if (StringUtil.isNotBlank(summary) && StringUtil.isNotBlank(ind1) && "3".equals(ind1)) { // Abstract
if (bibEntry.getField(StandardField.ABSTRACT).isPresent()) {
bibEntry.setField(StandardField.ABSTRACT, bibEntry.getField(StandardField.ABSTRACT).get().concat(summary));
} else {
bibEntry.setField(StandardField.ABSTRACT, summary);
}
}
}
private void putKeywords(BibEntry bibEntry, Element datafield) {
String keyword = getSubfield("a", datafield);
if (StringUtil.isNotBlank(keyword)) {
Optional<String> keywords = bibEntry.getField(StandardField.KEYWORDS);
if (keywords.isPresent()) {
bibEntry.setField(StandardField.KEYWORDS, keywords.get() + ", " + keyword);
} else {
bibEntry.setField(StandardField.KEYWORDS, keyword);
}
}
}
private void putDoi(BibEntry bibEntry, Element datafield) {
String ind1 = datafield.getAttribute("ind1");
String resource = getSubfield("u", datafield);
if ("e".equals(ind1) && StringUtil.isNotBlank("u") && StringUtil.isNotBlank(resource)) { // DOI
String fulltext = getSubfield("3", datafield);
if ("Volltext".equals(fulltext)) {
try {
LinkedFile linkedFile = new LinkedFile(new URL(resource), "PDF");
bibEntry.setField(StandardField.FILE, linkedFile.toString());
} catch (
MalformedURLException e) {
LOGGER.info("Malformed URL: {}", resource);
}
} else {
bibEntry.setField(StandardField.DOI, resource);
}
}
}
private void putElectronicLocation(BibEntry bibEntry, Element datafield) {
// 856 - fulltext pdf url
String ind1 = datafield.getAttribute("ind1");
String ind2 = datafield.getAttribute("ind2");
if ("4".equals(ind1) && "0".equals(ind2)) {
String fulltext = getSubfield("3", datafield);
String resource = getSubfield("u", datafield);
if ("Volltext".equals(fulltext) && StringUtil.isNotBlank(resource)) {
try {
LinkedFile linkedFile = new LinkedFile(new URL(resource), "PDF");
bibEntry.setField(StandardField.FILE, linkedFile.toString());
} catch (
MalformedURLException e) {
LOGGER.info("Malformed URL: {}", resource);
}
} else {
bibEntry.setField(StandardField.URL, resource);
}
}
}
private void putNotes(BibEntry bibEntry, Element datafield) {
String[] notes = new String[] {
getSubfield("a", datafield),
getSubfield("0", datafield),
getSubfield("h", datafield),
getSubfield("S", datafield),
getSubfield("c", datafield),
getSubfield("f", datafield),
getSubfield("i", datafield),
getSubfield("k", datafield),
getSubfield("l", datafield),
getSubfield("z", datafield),
getSubfield("3", datafield),
getSubfield("5", datafield)
};
String notesJoined = Arrays.stream(notes)
.filter(StringUtil::isNotBlank)
.collect(Collectors.joining("\n\n"));
if (bibEntry.getField(StandardField.NOTE).isPresent()) {
bibEntry.setField(StandardField.NOTE, bibEntry.getField(StandardField.NOTE).get().concat(notesJoined));
} else {
bibEntry.setField(StandardField.NOTE, notesJoined);
}
}
private String getSubfield(String a, Element datafield) {
List<Element> subfields = getChildren("subfield", datafield);
for (Element subfield : subfields) {
if (subfield.getAttribute("code").equals(a)) {
return subfield.getTextContent();
}
}
return null;
}
private Element getChild(String name, Element e) {
if (e == null) {
return null;
}
NodeList children = e.getChildNodes();
int j = children.getLength();
for (int i = 0; i < j; i++) {
Node test = children.item(i);
if (test.getNodeType() == Node.ELEMENT_NODE) {
Element entry = (Element) test;
if (entry.getTagName().equals(name)) {
return entry;
}
}
}
return null;
}
private List<Element> getChildren(String name, Element e) {
List<Element> result = new LinkedList<>();
NodeList children = e.getChildNodes();
int j = children.getLength();
for (int i = 0; i < j; i++) {
Node test = children.item(i);
if (test.getNodeType() == Node.ELEMENT_NODE) {
Element entry = (Element) test;
if (entry.getTagName().equals(name)) {
result.add(entry);
}
}
}
return result;
}
}
| 17,227 | 37.36971 | 198 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/MedlineImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import javax.xml.XMLConstants;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.events.XMLEvent;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fileformat.medline.ArticleId;
import org.jabref.logic.importer.fileformat.medline.Investigator;
import org.jabref.logic.importer.fileformat.medline.MeshHeading;
import org.jabref.logic.importer.fileformat.medline.OtherId;
import org.jabref.logic.importer.fileformat.medline.PersonalNameSubject;
import org.jabref.logic.importer.util.MathMLParser;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import com.google.common.base.Joiner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Importer for the Medline/Pubmed format.
* <p>
* check here for details on the format https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html
*/
public class MedlineImporter extends Importer implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(MedlineImporter.class);
private static final String KEYWORD_SEPARATOR = "; ";
private static final Locale ENGLISH = Locale.ENGLISH;
private static String join(List<String> list, String string) {
return Joiner.on(string).join(list);
}
@Override
public String getName() {
return "Medline/PubMed";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.MEDLINE;
}
@Override
public String getId() {
return "medline";
}
@Override
public String getDescription() {
return "Importer for the Medline format.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
String str;
int i = 0;
while (((str = reader.readLine()) != null) && (i < 50)) {
if (str.toLowerCase(ENGLISH).contains("<pubmedarticle>")
|| str.toLowerCase(ENGLISH).contains("<pubmedbookarticle>")) {
return true;
}
i++;
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader input) throws IOException {
Objects.requireNonNull(input);
List<BibEntry> bibItems = new ArrayList<>();
try {
XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance();
// prevent xxe (https://rules.sonarsource.com/java/RSPEC-2755)
xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, "");
// required for reading Unicode characters such as ö
xmlInputFactory.setProperty(XMLInputFactory.IS_COALESCING, true);
XMLStreamReader reader = xmlInputFactory.createXMLStreamReader(input);
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "PubmedArticle" -> {
parseArticle(reader, bibItems, elementName);
}
case "PubmedBookArticle" -> {
parseBookArticle(reader, bibItems, elementName);
}
}
}
}
} catch (XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
return new ParserResult(bibItems);
}
private void parseBookArticle(XMLStreamReader reader, List<BibEntry> bibItems, String startElement)
throws XMLStreamException {
Map<Field, String> fields = new HashMap<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "BookDocument" -> {
parseBookDocument(reader, fields, elementName);
}
case "PublicationStatus" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.PUBSTATE, reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
BibEntry entry = new BibEntry(StandardEntryType.Article);
entry.setField(fields);
bibItems.add(entry);
}
private void parseBookDocument(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
// multiple occurrences of the following fields can be present
List<String> sectionTitleList = new ArrayList<>();
List<String> keywordList = new ArrayList<>();
List<String> publicationTypeList = new ArrayList<>();
List<String> articleTitleList = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "PMID" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
fields.put(StandardField.PMID, reader.getText());
}
}
case "DateRevised", "ContributionDate" -> {
parseDate(reader, fields, elementName);
}
case "Abstract" -> {
addAbstract(reader, fields, elementName);
}
case "Pagination" -> {
addPagination(reader, fields, elementName);
}
case "Section" -> {
parseSections(reader, sectionTitleList);
}
case "Keyword" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
keywordList.add(reader.getText());
}
}
case "PublicationType" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
publicationTypeList.add(reader.getText());
}
}
case "ArticleTitle" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
articleTitleList.add(reader.getText());
}
}
case "Book" -> {
parseBookInformation(reader, fields, elementName);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
// populate multiple occurrence fields
if (!sectionTitleList.isEmpty()) {
fields.put(new UnknownField("sections"), join(sectionTitleList, "; "));
}
addKeywords(fields, keywordList);
if (!publicationTypeList.isEmpty()) {
fields.put(new UnknownField("pubtype"), join(publicationTypeList, ", "));
}
if (!articleTitleList.isEmpty()) {
fields.put(new UnknownField("article"), join(articleTitleList, ", "));
}
}
private void parseBookInformation(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
List<String> isbnList = new ArrayList<>();
List<String> titleList = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "PublisherName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.PUBLISHER, reader.getText());
}
}
case "PublisherLocation" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("publocation"), reader.getText());
}
}
case "BookTitle" -> {
handleTextElement(reader, titleList, elementName);
}
case "PubDate" -> {
addPubDate(reader, fields, elementName);
}
case "AuthorList" -> {
handleAuthorList(reader, fields, elementName);
}
case "Volume" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.VOLUME, reader.getText());
}
}
case "Edition" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.EDITION, reader.getText());
}
}
case "Medium" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("medium"), reader.getText());
}
}
case "ReportNumber" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("reportnumber"), reader.getText());
}
}
case "ELocationID" -> {
String eidType = reader.getAttributeValue(null, "EIdType");
reader.next();
if (isCharacterXMLEvent(reader)) {
handleElocationId(fields, reader, eidType);
}
}
case "Isbn" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
isbnList.add(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
if (!isbnList.isEmpty()) {
fields.put(StandardField.ISBN, join(isbnList, ", "));
}
if (!titleList.isEmpty()) {
putIfValueNotNull(fields, StandardField.TITLE, join(titleList, " "));
}
}
private void handleElocationId(Map<Field, String> fields, XMLStreamReader reader, String eidType) {
if ("doi".equals(eidType)) {
fields.put(StandardField.DOI, reader.getText());
}
if ("pii".equals(eidType)) {
fields.put(new UnknownField("pii"), reader.getText());
}
}
private void parseSections(XMLStreamReader reader, List<String> sectionTitleList) throws XMLStreamException {
int sectionLevel = 0;
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "SectionTitle" -> {
reader.next();
if (isCharacterXMLEvent(reader) && sectionLevel == 0) {
// we only collect SectionTitles from root level Sections
sectionTitleList.add(reader.getText());
}
}
case "Section" -> {
sectionLevel++;
}
}
}
if (isEndXMLEvent(reader) && "Section".equals(reader.getName().getLocalPart())) {
if (sectionLevel == 0) {
break;
} else {
sectionLevel--;
}
}
}
}
private void parseArticle(XMLStreamReader reader, List<BibEntry> bibItems, String startElement)
throws XMLStreamException {
Map<Field, String> fields = new HashMap<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "MedlineCitation" -> {
parseMedlineCitation(reader, fields, elementName);
}
case "PubmedData" -> {
parsePubmedData(reader, fields, elementName);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
BibEntry entry = new BibEntry(StandardEntryType.Article);
entry.setField(fields);
bibItems.add(entry);
}
private void parsePubmedData(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
String publicationStatus = "";
List<ArticleId> articleIdList = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "PublicationStatus" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
publicationStatus = reader.getText();
}
}
case "ArticleId" -> {
String idType = reader.getAttributeValue(null, "IdType");
reader.next();
if (isCharacterXMLEvent(reader)) {
articleIdList.add(new ArticleId(idType, reader.getText()));
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
if (fields.get(new UnknownField("revised")) != null) {
putIfValueNotNull(fields, StandardField.PUBSTATE, publicationStatus);
if (!articleIdList.isEmpty()) {
addArticleIdList(fields, articleIdList);
}
}
}
private void parseMedlineCitation(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
// multiple occurrences of the following fields can be present
List<String> citationSubsets = new ArrayList<>();
List<MeshHeading> meshHeadingList = new ArrayList<>();
List<PersonalNameSubject> personalNameSubjectList = new ArrayList<>();
List<OtherId> otherIdList = new ArrayList<>();
List<String> keywordList = new ArrayList<>();
List<String> spaceFlightMissionList = new ArrayList<>();
List<Investigator> investigatorList = new ArrayList<>();
List<String> generalNoteList = new ArrayList<>();
String status = reader.getAttributeValue(null, "Status");
String owner = reader.getAttributeValue(null, "Owner");
int latestVersion = 0;
fields.put(new UnknownField("status"), status);
fields.put(StandardField.OWNER, owner);
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "DateCreated", "DateCompleted", "DateRevised" -> {
parseDate(reader, fields, elementName);
}
case "Article" -> {
parseArticleInformation(reader, fields);
}
case "PMID" -> {
String versionStr = reader.getAttributeValue(null, "Version");
reader.next();
if (versionStr != null) {
int version = Integer.parseInt(versionStr);
if (isCharacterXMLEvent(reader) && version > latestVersion) {
latestVersion = version;
fields.put(StandardField.PMID, reader.getText());
}
}
}
case "MedlineJournalInfo" -> {
parseMedlineJournalInfo(reader, fields, elementName);
}
case "ChemicalList" -> {
parseChemicalList(reader, fields, elementName);
}
case "CitationSubset" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
citationSubsets.add(reader.getText());
}
}
case "GeneSymbolList" -> {
parseGeneSymbolList(reader, fields, elementName);
}
case "MeshHeading" -> {
parseMeshHeading(reader, meshHeadingList, elementName);
}
case "NumberOfReferences" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("references"), reader.getText());
}
}
case "PersonalNameSubject" -> {
parsePersonalNameSubject(reader, personalNameSubjectList, elementName);
}
case "OtherID" -> {
String otherIdSource = reader.getAttributeValue(null, "Source");
reader.next();
if (isCharacterXMLEvent(reader)) {
String content = reader.getText();
otherIdList.add(new OtherId(otherIdSource, content));
}
}
case "Keyword" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
keywordList.add(reader.getText());
}
}
case "SpaceFlightMission" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
spaceFlightMissionList.add(reader.getText());
}
}
case "Investigator" -> {
parseInvestigator(reader, investigatorList, elementName);
}
case "GeneralNote" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
generalNoteList.add(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
// populate multiple occurrence fields
if (!citationSubsets.isEmpty()) {
fields.put(new UnknownField("citation-subset"), join(citationSubsets, ", "));
}
addMeshHeading(fields, meshHeadingList);
addPersonalNames(fields, personalNameSubjectList);
addOtherId(fields, otherIdList);
addKeywords(fields, keywordList);
if (!spaceFlightMissionList.isEmpty()) {
fields.put(new UnknownField("space-flight-mission"), join(spaceFlightMissionList, ", "));
}
addInvestigators(fields, investigatorList);
addNotes(fields, generalNoteList);
}
private void parseInvestigator(XMLStreamReader reader, List<Investigator> investigatorList, String startElement)
throws XMLStreamException {
String lastName = "";
String foreName = "";
List<String> affiliationList = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "LastName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
lastName = reader.getText();
}
}
case "ForeName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
foreName = reader.getText();
}
}
case "Affiliation" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
affiliationList.add(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
investigatorList.add(new Investigator(lastName, foreName, affiliationList));
}
private void parsePersonalNameSubject(XMLStreamReader reader, List<PersonalNameSubject> personalNameSubjectList, String startElement)
throws XMLStreamException {
String lastName = "";
String foreName = "";
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "LastName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
lastName = reader.getText();
}
}
case "ForeName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
foreName = reader.getText();
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
personalNameSubjectList.add(new PersonalNameSubject(lastName, foreName));
}
private void parseMeshHeading(XMLStreamReader reader, List<MeshHeading> meshHeadingList, String startElement)
throws XMLStreamException {
String descriptorName = "";
List<String> qualifierNames = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "DescriptorName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
descriptorName = reader.getText();
}
}
case "QualifierName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
qualifierNames.add(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
meshHeadingList.add(new MeshHeading(descriptorName, qualifierNames));
}
private void parseGeneSymbolList(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
List<String> geneSymbols = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
if ("GeneSymbol".equals(elementName)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
geneSymbols.add(reader.getText());
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
if (!geneSymbols.isEmpty()) {
fields.put(new UnknownField("gene-symbols"), join(geneSymbols, ", "));
}
}
private void parseChemicalList(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
List<String> chemicalNames = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
if ("NameOfSubstance".equals(elementName)) {
reader.next();
if (isCharacterXMLEvent(reader)) {
chemicalNames.add(reader.getText());
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
fields.put(new UnknownField("chemicals"), join(chemicalNames, ", "));
}
private void parseMedlineJournalInfo(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "Country" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("country"), reader.getText());
}
}
case "MedlineTA" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("journal-abbreviation"), reader.getText());
}
}
case "NlmUniqueID" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("nlm-id"), reader.getText());
}
}
case "ISSNLinking" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("issn-linking"), reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
}
private void parseArticleInformation(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
List<String> titleList = new ArrayList<>();
String pubmodel = reader.getAttributeValue(null, "PubModel");
fields.put(new UnknownField("pubmodel"), pubmodel);
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "Journal" -> {
parseJournal(reader, fields);
}
case "ArticleTitle" -> {
handleTextElement(reader, titleList, elementName);
}
case "Pagination" -> {
addPagination(reader, fields, elementName);
}
case "ELocationID" -> {
String eidType = reader.getAttributeValue(null, "EIdType");
String validYN = reader.getAttributeValue(null, "ValidYN");
reader.next();
if (isCharacterXMLEvent(reader) && "Y".equals(validYN)) {
handleElocationId(fields, reader, eidType);
}
}
case "Abstract" -> {
addAbstract(reader, fields, elementName);
}
case "AuthorList" -> {
handleAuthorList(reader, fields, elementName);
}
}
}
if (isEndXMLEvent(reader) && "Article".equals(reader.getName().getLocalPart())) {
break;
}
}
if (!titleList.isEmpty()) {
fields.put(StandardField.TITLE, StringUtil.stripBrackets(join(titleList, " ")));
}
}
private void parseJournal(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "Title" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.JOURNAL, reader.getText());
}
}
case "ISSN" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.ISSN, reader.getText());
}
}
case "Volume" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.VOLUME, reader.getText());
}
}
case "Issue" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.ISSUE, reader.getText());
}
}
case "PubDate" -> {
addPubDate(reader, fields, elementName);
}
}
}
if (isEndXMLEvent(reader) && "Journal".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseDate(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
Optional<String> year = Optional.empty();
Optional<String> month = Optional.empty();
Optional<String> day = Optional.empty();
// mapping from date XML element to field name
Map<String, String> dateFieldMap = Map.of(
"DateCreated", "created",
"DateCompleted", "completed",
"DateRevised", "revised",
"ContributionDate", "contribution",
"PubDate", ""
);
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "Year" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
year = Optional.of(reader.getText());
}
}
case "Month" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
month = Optional.of(reader.getText());
}
}
case "Day" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
day = Optional.of(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
Optional<Date> date = Date.parse(year, month, day);
date.ifPresent(dateValue ->
fields.put(new UnknownField(dateFieldMap.get(startElement)), dateValue.getNormalized()));
}
private void addArticleIdList(Map<Field, String> fields, List<ArticleId> articleIdList) {
for (ArticleId id : articleIdList) {
if (!id.idType().isBlank()) {
if ("pubmed".equals(id.idType())) {
fields.computeIfAbsent(StandardField.PMID, k -> id.content());
} else {
fields.computeIfAbsent(FieldFactory.parseField(StandardEntryType.Article, id.idType()), k -> id.content());
}
}
}
}
private void addNotes(Map<Field, String> fields, List<String> generalNoteList) {
List<String> notes = new ArrayList<>();
for (String note : generalNoteList) {
if (!note.isBlank()) {
notes.add(note);
}
}
if (!notes.isEmpty()) {
fields.put(StandardField.NOTE, join(notes, ", "));
}
}
private void addInvestigators(Map<Field, String> fields, List<Investigator> investigatorList) {
List<String> investigatorNames = new ArrayList<>();
List<String> affiliationInfos = new ArrayList<>();
// add the investigators like the authors
if (!investigatorList.isEmpty()) {
for (Investigator investigator : investigatorList) {
StringBuilder result = new StringBuilder(investigator.lastName());
if (!investigator.foreName().isBlank()) {
result.append(", ").append(investigator.foreName());
}
investigatorNames.add(result.toString());
// now add the affiliation info
if (!investigator.affiliationList().isEmpty()) {
affiliationInfos.addAll(investigator.affiliationList());
}
}
if (!affiliationInfos.isEmpty()) {
fields.put(new UnknownField("affiliation"), join(affiliationInfos, ", "));
}
fields.put(new UnknownField("investigator"), join(investigatorNames, " and "));
}
}
private void addKeywords(Map<Field, String> fields, List<String> keywordList) {
// Check whether MeshHeadingList exists or not
if (fields.get(StandardField.KEYWORDS) == null) {
fields.put(StandardField.KEYWORDS, join(keywordList, KEYWORD_SEPARATOR));
} else {
if (!keywordList.isEmpty()) {
// if it exists, combine the MeshHeading with the keywords
String result = join(keywordList, "; ");
result = fields.get(StandardField.KEYWORDS) + KEYWORD_SEPARATOR + result;
fields.put(StandardField.KEYWORDS, result);
}
}
}
private void addOtherId(Map<Field, String> fields, List<OtherId> otherIdList) {
for (OtherId id : otherIdList) {
if (!id.source().isBlank() && !id.content().isBlank()) {
fields.put(FieldFactory.parseField(StandardEntryType.Article, id.source()), id.content());
}
}
}
private void addPersonalNames(Map<Field, String> fields, List<PersonalNameSubject> personalNameSubjectList) {
if (fields.get(StandardField.AUTHOR) == null) {
// if no authors appear, then add the personal names as authors
List<String> personalNames = new ArrayList<>();
if (!personalNameSubjectList.isEmpty()) {
for (PersonalNameSubject personalNameSubject : personalNameSubjectList) {
StringBuilder result = new StringBuilder(personalNameSubject.lastName());
if (!personalNameSubject.foreName().isBlank()) {
result.append(", ").append(personalNameSubject.foreName());
}
personalNames.add(result.toString());
}
fields.put(StandardField.AUTHOR, join(personalNames, " and "));
}
}
}
private void addMeshHeading(Map<Field, String> fields, List<MeshHeading> meshHeadingList) {
List<String> keywords = new ArrayList<>();
if (!meshHeadingList.isEmpty()) {
for (MeshHeading meshHeading : meshHeadingList) {
StringBuilder result = new StringBuilder(meshHeading.descriptorName());
if (meshHeading.qualifierNames() != null) {
for (String qualifierName : meshHeading.qualifierNames()) {
result.append(", ").append(qualifierName);
}
}
keywords.add(result.toString());
}
fields.put(StandardField.KEYWORDS, join(keywords, KEYWORD_SEPARATOR));
}
}
private void addPubDate(XMLStreamReader reader, Map<Field, String> fields, String startElement) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "MedlineDate" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
fields.put(StandardField.YEAR, extractYear(reader.getText()));
}
}
case "Year" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
fields.put(StandardField.YEAR, reader.getText());
}
}
case "Month" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
Optional<Month> month = Month.parse(reader.getText());
month.ifPresent(monthValue -> fields.put(StandardField.MONTH, monthValue.getJabRefFormat()));
}
}
case "Season" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
fields.put(new UnknownField("season"), reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
}
private void addAbstract(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
List<String> abstractTextList = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "CopyrightInformation" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("copyright"), reader.getText());
}
}
case "AbstractText" -> {
handleTextElement(reader, abstractTextList, elementName);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
if (!abstractTextList.isEmpty()) {
fields.put(StandardField.ABSTRACT, join(abstractTextList, " "));
}
}
/**
* Handles text entities that can have inner tags such as {@literal <}i{@literal >}, {@literal <}b{@literal >} etc.
* We ignore the tags and return only the characters present in the enclosing parent element.
*
*/
private void handleTextElement(XMLStreamReader reader, List<String> textList, String startElement)
throws XMLStreamException {
StringBuilder result = new StringBuilder();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "math" -> {
result.append(MathMLParser.parse(reader));
}
case "sup", "sub" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
result.append("(").append(reader.getText()).append(")");
}
}
}
} else if (isCharacterXMLEvent(reader)) {
result.append(reader.getText().trim()).append(" ");
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
textList.add(result.toString().trim());
}
private void addPagination(XMLStreamReader reader, Map<Field, String> fields, String startElement)
throws XMLStreamException {
String startPage = "";
String endPage = "";
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "MedlinePgn" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.PAGES, fixPageRange(reader.getText()));
}
}
case "StartPage" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
// it could happen, that the article has only a start page
startPage = reader.getText() + endPage;
putIfValueNotNull(fields, StandardField.PAGES, startPage);
}
}
case "EndPage" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
endPage = reader.getText();
// but it should not happen, that a endpage appears without startpage
fields.put(StandardField.PAGES, fixPageRange(startPage + "-" + endPage));
}
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
}
private String extractYear(String medlineDate) {
// The year of the medlineDate should be the first 4 digits
return medlineDate.substring(0, 4);
}
private void handleAuthorList(XMLStreamReader reader, Map<Field, String> fields, String startElement) throws XMLStreamException {
List<String> authorNames = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "Author" -> {
parseAuthor(reader, authorNames);
}
}
}
if (isEndXMLEvent(reader) && reader.getName().getLocalPart().equals(startElement)) {
break;
}
}
fields.put(StandardField.AUTHOR, join(authorNames, " and "));
}
private void parseAuthor(XMLStreamReader reader, List<String> authorNames) throws XMLStreamException {
StringBuilder authorName = new StringBuilder();
List<String> collectiveNames = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "CollectiveName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
collectiveNames.add(reader.getText());
}
}
case "LastName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
authorName = new StringBuilder(reader.getText());
}
}
case "ForeName" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
authorName.append(", ").append(reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && "Author".equals(reader.getName().getLocalPart())) {
break;
}
}
if (collectiveNames.size() > 0) {
authorNames.addAll(collectiveNames);
}
if (!authorName.toString().isBlank()) {
authorNames.add(authorName.toString());
}
}
private void putIfValueNotNull(Map<Field, String> fields, Field field, String value) {
if (value != null) {
fields.put(field, value);
}
}
/**
* Convert medline page ranges from short form to full form. Medline reports page ranges in a shorthand format.
* The last page is reported using only the digits which differ from the first page. i.e. 12345-51 refers to the actual range 12345-12351
*/
private String fixPageRange(String pageRange) {
int minusPos = pageRange.indexOf('-');
if (minusPos < 0) {
return pageRange;
}
String startPage = pageRange.substring(0, minusPos).trim();
String endPage = pageRange.substring(minusPos + 1).trim();
int lengthOfEndPage = endPage.length();
int lengthOfStartPage = startPage.length();
if (lengthOfEndPage < lengthOfStartPage) {
endPage = startPage.substring(0, lengthOfStartPage - lengthOfEndPage) + endPage;
}
return startPage + "--" + endPage;
}
private boolean isCharacterXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.CHARACTERS;
}
private boolean isStartXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.START_ELEMENT;
}
private boolean isEndXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.END_ELEMENT;
}
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
try {
return importDatabase(
new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))).getDatabase().getEntries();
} catch (IOException e) {
LOGGER.error(e.getLocalizedMessage(), e);
}
return Collections.emptyList();
}
}
| 50,352 | 38.74191 | 141 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/MedlinePlainImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Importer for the MEDLINE Plain format.
* <p>
* check here for details on the format http://www.nlm.nih.gov/bsd/mms/medlineelements.html
*/
public class MedlinePlainImporter extends Importer {
private static final Pattern PMID_PATTERN = Pattern.compile("PMID.*-.*");
private static final Pattern PMC_PATTERN = Pattern.compile("PMC.*-.*");
private static final Pattern PMCR_PATTERN = Pattern.compile("PMCR.*-.*");
private static final Pattern CREATE_DATE_PATTERN = Pattern.compile("\\d{4}/[0123]?\\d/\\s?[012]\\d:[0-5]\\d");
private static final Pattern COMPLETE_DATE_PATTERN = Pattern.compile("\\d{8}");
@Override
public String getName() {
return "Medline/PubMed Plain";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.MEDLINE_PLAIN;
}
@Override
public String getDescription() {
return "Importer for the MedlinePlain format.";
}
@Override
public String getId() {
return "medlineplain";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// Our strategy is to look for the "PMID - *", "PMC.*-.*", or "PMCR.*-.*" line
// (i.e., PubMed Unique Identifier, PubMed Central Identifier, PubMed Central Release)
String str;
while ((str = reader.readLine()) != null) {
if (PMID_PATTERN.matcher(str).find() || PMC_PATTERN.matcher(str).find()
|| PMCR_PATTERN.matcher(str).find()) {
return true;
}
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
// use optional here, so that no exception will be thrown if the file is empty
String linesAsString = reader.lines().reduce((line, nextline) -> line + "\n" + nextline).orElse("");
String[] entries = linesAsString.replace("\u2013", "-").replace("\u2014", "--").replace("\u2015", "--")
.split("\\n\\n");
for (String entry1 : entries) {
if (entry1.trim().isEmpty() || !entry1.contains("-")) {
continue;
}
EntryType type = BibEntry.DEFAULT_TYPE;
String author = "";
String editor = "";
String comment = "";
Map<Field, String> fieldConversionMap = new HashMap<>();
String[] lines = entry1.split("\n");
for (int j = 0; j < lines.length; j++) {
StringBuilder current = new StringBuilder(lines[j]);
boolean done = false;
while (!done && (j < (lines.length - 1))) {
if (lines[j + 1].length() <= 4) {
j++;
continue;
}
if (lines[j + 1].charAt(4) != '-') {
if ((current.length() > 0) && !Character.isWhitespace(current.charAt(current.length() - 1))) {
current.append(' ');
}
current.append(lines[j + 1].trim());
j++;
} else {
done = true;
}
}
String entry = current.toString();
if (!checkLineValidity(entry)) {
continue;
}
String label = entry.substring(0, entry.indexOf('-')).trim();
String value = entry.substring(entry.indexOf('-') + 1).trim();
if ("PT".equals(label)) {
type = addSourceType(value, type);
}
addDates(fieldConversionMap, label, value);
addAbstract(fieldConversionMap, label, value);
addTitles(fieldConversionMap, label, value, type);
addIDs(fieldConversionMap, label, value);
addStandardNumber(fieldConversionMap, label, value);
if ("FAU".equals(label)) {
if ("".equals(author)) {
author = value;
} else {
author += " and " + value;
}
} else if ("FED".equals(label)) {
if ("".equals(editor)) {
editor = value;
} else {
editor += " and " + value;
}
}
// store the fields in a map
Map<String, Field> hashMap = new HashMap<>();
hashMap.put("PG", StandardField.PAGES);
hashMap.put("PL", StandardField.ADDRESS);
hashMap.put("PHST", new UnknownField("history"));
hashMap.put("PST", new UnknownField("publication-status"));
hashMap.put("VI", StandardField.VOLUME);
hashMap.put("LA", StandardField.LANGUAGE);
hashMap.put("PUBM", new UnknownField("model"));
hashMap.put("RN", new UnknownField("registry-number"));
hashMap.put("NM", new UnknownField("substance-name"));
hashMap.put("OCI", new UnknownField("copyright-owner"));
hashMap.put("CN", new UnknownField("corporate"));
hashMap.put("IP", StandardField.ISSUE);
hashMap.put("EN", StandardField.EDITION);
hashMap.put("GS", new UnknownField("gene-symbol"));
hashMap.put("GN", StandardField.NOTE);
hashMap.put("GR", new UnknownField("grantno"));
hashMap.put("SO", new UnknownField("source"));
hashMap.put("NR", new UnknownField("number-of-references"));
hashMap.put("SFM", new UnknownField("space-flight-mission"));
hashMap.put("STAT", new UnknownField("status"));
hashMap.put("SB", new UnknownField("subset"));
hashMap.put("OTO", new UnknownField("termowner"));
hashMap.put("OWN", StandardField.OWNER);
// add the fields to hm
for (Map.Entry<String, Field> mapEntry : hashMap.entrySet()) {
String medlineKey = mapEntry.getKey();
Field bibtexKey = mapEntry.getValue();
if (medlineKey.equals(label)) {
fieldConversionMap.put(bibtexKey, value);
}
}
if ("IRAD".equals(label) || "IR".equals(label) || "FIR".equals(label)) {
String oldInvestigator = fieldConversionMap.get(new UnknownField("investigator"));
if (oldInvestigator == null) {
fieldConversionMap.put(new UnknownField("investigator"), value);
} else {
fieldConversionMap.put(new UnknownField("investigator"), oldInvestigator + ", " + value);
}
} else if ("MH".equals(label) || "OT".equals(label)) {
if (!fieldConversionMap.containsKey(StandardField.KEYWORDS)) {
fieldConversionMap.put(StandardField.KEYWORDS, value);
} else {
String kw = fieldConversionMap.get(StandardField.KEYWORDS);
fieldConversionMap.put(StandardField.KEYWORDS, kw + ", " + value);
}
} else if ("CON".equals(label) || "CIN".equals(label) || "EIN".equals(label) || "EFR".equals(label)
|| "CRI".equals(label) || "CRF".equals(label) || "PRIN".equals(label) || "PROF".equals(label)
|| "RPI".equals(label) || "RPF".equals(label) || "RIN".equals(label) || "ROF".equals(label)
|| "UIN".equals(label) || "UOF".equals(label) || "SPIN".equals(label) || "ORI".equals(label)) {
if (!comment.isEmpty()) {
comment = comment + "\n";
}
comment = comment + value;
}
}
fixAuthors(fieldConversionMap, author, StandardField.AUTHOR);
fixAuthors(fieldConversionMap, editor, StandardField.EDITOR);
if (!comment.isEmpty()) {
fieldConversionMap.put(StandardField.COMMENT, comment);
}
BibEntry b = new BibEntry(type);
// create one here
b.setField(fieldConversionMap);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
private boolean checkLineValidity(String line) {
return (line.length() >= 5) && (line.charAt(4) == '-');
}
private EntryType addSourceType(String value, EntryType type) {
String val = value.toLowerCase(Locale.ENGLISH);
switch (val) {
case "book":
return StandardEntryType.Book;
case "journal article":
case "classical article":
case "corrected and republished article":
case "historical article":
case "introductory journal article":
case "newspaper article":
return StandardEntryType.Article;
case "clinical conference":
case "consensus development conference":
case "consensus development conference, nih":
return StandardEntryType.Conference;
case "technical report":
return StandardEntryType.TechReport;
case "editorial":
return StandardEntryType.InProceedings;
case "overall":
return StandardEntryType.Proceedings;
default:
return type;
}
}
private void addStandardNumber(Map<Field, String> hm, String lab, String value) {
if ("IS".equals(lab)) {
Field key = StandardField.ISSN;
// it is possible to have two issn, one for electronic and for print
// if there are two then it comes at the end in brackets (electronic) or (print)
// so search for the brackets
if (value.indexOf('(') > 0) {
int keyStart = value.indexOf('(');
int keyEnd = value.indexOf(')');
key = new UnknownField(value.substring(keyStart + 1, keyEnd) + "-" + key);
String numberValue = value.substring(0, keyStart - 1);
hm.put(key, numberValue);
} else {
hm.put(key, value);
}
} else if ("ISBN".equals(lab)) {
hm.put(StandardField.ISBN, value);
}
}
private void fixAuthors(Map<Field, String> hm, String author, Field field) {
if (!author.isEmpty()) {
String fixedAuthor = AuthorList.fixAuthorLastNameFirst(author);
hm.put(field, fixedAuthor);
}
}
private void addIDs(Map<Field, String> hm, String lab, String value) {
if ("AID".equals(lab)) {
Field key = new UnknownField("article-id");
String idValue = value;
if (value.startsWith("doi:")) {
idValue = idValue.replaceAll("(?i)doi:", "").trim();
key = StandardField.DOI;
} else if (value.indexOf('[') > 0) {
int startOfIdentifier = value.indexOf('[');
int endOfIdentifier = value.indexOf(']');
key = new UnknownField("article-" + value.substring(startOfIdentifier + 1, endOfIdentifier));
idValue = value.substring(0, startOfIdentifier - 1);
}
hm.put(key, idValue);
} else if ("LID".equals(lab)) {
hm.put(new UnknownField("location-id"), value);
} else if ("MID".equals(lab)) {
hm.put(new UnknownField("manuscript-id"), value);
} else if ("JID".equals(lab)) {
hm.put(new UnknownField("nlm-unique-id"), value);
} else if ("OID".equals(lab)) {
hm.put(new UnknownField("other-id"), value);
} else if ("SI".equals(lab)) {
hm.put(new UnknownField("second-id"), value);
}
}
private void addTitles(Map<Field, String> hm, String lab, String val, EntryType type) {
if ("TI".equals(lab)) {
String oldVal = hm.get(StandardField.TITLE);
if (oldVal == null) {
hm.put(StandardField.TITLE, val);
} else {
if (oldVal.endsWith(":") || oldVal.endsWith(".") || oldVal.endsWith("?")) {
hm.put(StandardField.TITLE, oldVal + " " + val);
} else {
hm.put(StandardField.TITLE, oldVal + ": " + val);
}
}
} else if ("BTI".equals(lab) || "CTI".equals(lab)) {
hm.put(StandardField.BOOKTITLE, val);
} else if ("JT".equals(lab)) {
if (type.equals(StandardEntryType.InProceedings)) {
hm.put(StandardField.BOOKTITLE, val);
} else {
hm.put(StandardField.JOURNAL, val);
}
} else if ("CTI".equals(lab)) {
hm.put(new UnknownField("collection-title"), val);
} else if ("TA".equals(lab)) {
hm.put(new UnknownField("title-abbreviation"), val);
} else if ("TT".equals(lab)) {
hm.put(new UnknownField("transliterated-title"), val);
} else if ("VTI".equals(lab)) {
hm.put(new UnknownField("volume-title"), val);
}
}
private void addAbstract(Map<Field, String> hm, String lab, String value) {
String abstractValue = "";
if ("AB".equals(lab)) {
// adds copyright information that comes at the end of an abstract
if (value.contains("Copyright")) {
int copyrightIndex = value.lastIndexOf("Copyright");
// remove the copyright from the field since the name of the field is copyright
String copyrightInfo = value.substring(copyrightIndex).replaceAll("Copyright ", "");
hm.put(new UnknownField("copyright"), copyrightInfo);
abstractValue = value.substring(0, copyrightIndex);
} else {
abstractValue = value;
}
String oldAb = hm.get(StandardField.ABSTRACT);
if (oldAb == null) {
hm.put(StandardField.ABSTRACT, abstractValue);
} else {
hm.put(StandardField.ABSTRACT, oldAb + OS.NEWLINE + abstractValue);
}
} else if ("OAB".equals(lab) || "OABL".equals(lab)) {
hm.put(new UnknownField("other-abstract"), value);
}
}
private void addDates(Map<Field, String> hm, String lab, String val) {
if ("CRDT".equals(lab) && isCreateDateFormat(val)) {
hm.put(new UnknownField("create-date"), val);
} else if ("DEP".equals(lab) && isDateFormat(val)) {
hm.put(new UnknownField("electronic-publication"), val);
} else if ("DA".equals(lab) && isDateFormat(val)) {
hm.put(new UnknownField("date-created"), val);
} else if ("DCOM".equals(lab) && isDateFormat(val)) {
hm.put(new UnknownField("completed"), val);
} else if ("LR".equals(lab) && isDateFormat(val)) {
hm.put(new UnknownField("revised"), val);
} else if ("DP".equals(lab)) {
String[] parts = val.split(" ");
hm.put(StandardField.YEAR, parts[0]);
if ((parts.length > 1) && !parts[1].isEmpty()) {
hm.put(StandardField.MONTH, parts[1]);
}
} else if ("EDAT".equals(lab) && isCreateDateFormat(val)) {
hm.put(new UnknownField("publication"), val);
} else if ("MHDA".equals(lab) && isCreateDateFormat(val)) {
hm.put(new UnknownField("mesh-date"), val);
}
}
private boolean isCreateDateFormat(String value) {
return CREATE_DATE_PATTERN.matcher(value).matches();
}
private boolean isDateFormat(String value) {
return COMPLETE_DATE_PATTERN.matcher(value).matches();
}
}
| 17,000 | 42.369898 | 119 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/ModsImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import javax.xml.XMLConstants;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.events.XMLEvent;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fileformat.mods.Identifier;
import org.jabref.logic.importer.fileformat.mods.Name;
import org.jabref.logic.importer.fileformat.mods.RecordInfo;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryTypeFactory;
import com.google.common.base.Joiner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Importer for the MODS format.<br>
* More details about the format can be found here <a href="http://www.loc.gov/standards/mods/">http://www.loc.gov/standards/mods/</a>. <br>
* The newest xml schema can also be found here <a href="www.loc.gov/standards/mods/mods-schemas.html.">www.loc.gov/standards/mods/mods-schemas.html.</a>.
*/
public class ModsImporter extends Importer implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(ModsImporter.class);
private static final Pattern MODS_PATTERN = Pattern.compile("<mods .*>");
private final String keywordSeparator;
public ModsImporter(ImportFormatPreferences importFormatPreferences) {
keywordSeparator = importFormatPreferences.bibEntryPreferences().getKeywordSeparator() + " ";
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return input.lines().anyMatch(line -> MODS_PATTERN.matcher(line).find());
}
@Override
public ParserResult importDatabase(BufferedReader input) throws IOException {
Objects.requireNonNull(input);
List<BibEntry> bibItems = new ArrayList<>();
try {
XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance();
// prevent xxe (https://rules.sonarsource.com/java/RSPEC-2755)
xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, "");
xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, "");
XMLStreamReader reader = xmlInputFactory.createXMLStreamReader(input);
parseModsCollection(bibItems, reader);
} catch (XMLStreamException e) {
LOGGER.debug("could not parse document", e);
return ParserResult.fromError(e);
}
return new ParserResult(bibItems);
}
private void parseModsCollection(List<BibEntry> bibItems, XMLStreamReader reader) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader) && "mods".equals(reader.getName().getLocalPart())) {
BibEntry entry = new BibEntry();
Map<Field, String> fields = new HashMap<>();
String id = reader.getAttributeValue(null, "ID");
if (id != null) {
entry.setCitationKey(id);
}
parseModsGroup(fields, reader, entry);
entry.setField(fields);
bibItems.add(entry);
}
}
}
private void parseModsGroup(Map<Field, String> fields, XMLStreamReader reader, BibEntry entry) throws XMLStreamException {
// These elements (subject, keywords and authors) can appear more than once,
// so they are collected in lists
List<String> notes = new ArrayList<>();
List<String> keywords = new ArrayList<>();
List<String> authors = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
// check which MODS group has started
switch (elementName) {
case "abstract" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.ABSTRACT, reader.getText());
}
}
case "genre" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
entry.setType(EntryTypeFactory.parse(mapGenre(reader.getText())));
}
}
case "language" -> {
parseLanguage(reader, fields);
}
case "location" -> {
parseLocationAndUrl(reader, fields);
}
case "identifier" -> {
String type = reader.getAttributeValue(null, "type");
reader.next();
if (isCharacterXMLEvent(reader)) {
parseIdentifier(fields, new Identifier(type, reader.getText()), entry);
}
}
case "note" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
notes.add(reader.getText());
}
}
case "recordInfo" -> {
parseRecordInfo(reader, fields);
}
case "titleInfo" -> {
parseTitle(reader, fields);
}
case "subject" -> {
parseSubject(reader, fields, keywords);
}
case "originInfo" -> {
parseOriginInfo(reader, fields);
}
case "name" -> {
parseName(reader, fields, authors);
}
case "relatedItem" -> {
parseRelatedItem(reader, fields);
}
}
}
if (isEndXMLEvent(reader) && "mods".equals(reader.getName().getLocalPart())) {
break;
}
}
putIfListIsNotEmpty(fields, notes, StandardField.NOTE, ", ");
putIfListIsNotEmpty(fields, keywords, StandardField.KEYWORDS, this.keywordSeparator);
putIfListIsNotEmpty(fields, authors, StandardField.AUTHOR, " and ");
}
/**
* Parses information from the RelatedModsGroup. It has the same elements as ModsGroup.
* But information like volume, issue and the pages appear here instead of in the ModsGroup.
* Also, if there appears a title field, then this indicates that is the name of the journal
* which the article belongs to.
*/
private void parseRelatedItem(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
switch (reader.getName().getLocalPart()) {
case "title" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.JOURNAL, reader.getText());
}
}
case "detail" -> {
handleDetail(reader, fields);
}
case "extent" -> {
handleExtent(reader, fields);
}
}
}
if (isEndXMLEvent(reader) && "relatedItem".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void handleExtent(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
String total = "";
String startPage = "";
String endPage = "";
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
reader.next();
switch (elementName) {
case "total" -> {
if (isCharacterXMLEvent(reader)) {
total = reader.getText();
}
}
case "start" -> {
if (isCharacterXMLEvent(reader)) {
startPage = reader.getText();
}
}
case "end" -> {
if (isCharacterXMLEvent(reader)) {
endPage = reader.getText();
}
}
}
}
if (isEndXMLEvent(reader) && "extent".equals(reader.getName().getLocalPart())) {
break;
}
}
if (!total.isBlank()) {
putIfValueNotNull(fields, StandardField.PAGES, total);
} else if (!startPage.isBlank()) {
putIfValueNotNull(fields, StandardField.PAGES, startPage);
if (!endPage.isBlank()) {
// if end appears, then there has to be a start page appeared, so get it and put it together with
// the end page
fields.put(StandardField.PAGES, startPage + "-" + endPage);
}
}
}
private void handleDetail(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
String type = reader.getAttributeValue(null, "type");
Set<String> detailElementSet = Set.of("number", "caption", "title");
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
if (detailElementSet.contains(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, FieldFactory.parseField(type), reader.getText());
}
}
}
if (isEndXMLEvent(reader) && "detail".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseName(XMLStreamReader reader, Map<Field, String> fields, List<String> authors) throws XMLStreamException {
List<Name> names = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
if ("affiliation".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("affiliation"), reader.getText());
}
} else if ("namePart".equals(reader.getName().getLocalPart())) {
String type = reader.getAttributeValue(null, "type");
reader.next();
if (isCharacterXMLEvent(reader)) {
names.add(new Name(reader.getText(), type));
}
}
}
if (isEndXMLEvent(reader) && "name".equals(reader.getName().getLocalPart())) {
break;
}
}
handleAuthorsInNamePart(names, authors);
}
private void parseOriginInfo(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
List<String> places = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
String elementName = reader.getName().getLocalPart();
switch (elementName) {
case "issuance" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("issuance"), reader.getText());
}
}
case "placeTerm" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
appendIfValueNotNullOrBlank(places, reader.getText());
}
}
case "publisher" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.PUBLISHER, reader.getText());
}
}
case "edition" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.EDITION, reader.getText());
}
}
case "dateIssued", "dateCreated", "dateCaptured", "dateModified" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putDate(fields, elementName, reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && "originInfo".equals(reader.getName().getLocalPart())) {
break;
}
}
putIfListIsNotEmpty(fields, places, StandardField.ADDRESS, ", ");
}
private void parseSubject(XMLStreamReader reader, Map<Field, String> fields, List<String> keywords) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
switch (reader.getName().getLocalPart()) {
case "topic" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
keywords.add(reader.getText().trim());
}
}
case "city" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("city"), reader.getText());
}
}
case "country" -> {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, new UnknownField("country"), reader.getText());
}
}
}
}
if (isEndXMLEvent(reader) && "subject".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseRecordInfo(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
RecordInfo recordInfoDefinition = new RecordInfo();
List<String> recordContents = recordInfoDefinition.recordContents();
List<String> languages = recordInfoDefinition.languages();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
if (RecordInfo.elementNameSet.contains(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
recordContents.add(0, reader.getText());
}
} else if ("languageTerm".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
languages.add(reader.getText());
}
}
}
if (isEndXMLEvent(reader) && "recordInfo".equals(reader.getName().getLocalPart())) {
break;
}
}
for (String recordContent : recordContents) {
putIfValueNotNull(fields, new UnknownField("source"), recordContent);
}
putIfListIsNotEmpty(fields, languages, StandardField.LANGUAGE, ", ");
}
private void parseLanguage(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader) && "languageTerm".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.LANGUAGE, reader.getText());
}
}
if (isEndXMLEvent(reader) && "language".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseTitle(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader) && "title".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
putIfValueNotNull(fields, StandardField.TITLE, reader.getText());
}
}
if (isEndXMLEvent(reader) && "titleInfo".equals(reader.getName().getLocalPart())) {
break;
}
}
}
private void parseLocationAndUrl(XMLStreamReader reader, Map<Field, String> fields) throws XMLStreamException {
List<String> locations = new ArrayList<>();
List<String> urls = new ArrayList<>();
while (reader.hasNext()) {
reader.next();
if (isStartXMLEvent(reader)) {
if ("physicalLocation".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
locations.add(reader.getText());
}
} else if ("url".equals(reader.getName().getLocalPart())) {
reader.next();
if (isCharacterXMLEvent(reader)) {
urls.add(reader.getText());
}
}
}
if (isEndXMLEvent(reader) && "location".equals(reader.getName().getLocalPart())) {
break;
}
}
putIfListIsNotEmpty(fields, locations, StandardField.LOCATION, ", ");
putIfListIsNotEmpty(fields, urls, StandardField.URL, ", ");
}
private String mapGenre(String genre) {
return switch (genre.toLowerCase(Locale.ROOT)) {
case "conference publication" -> "proceedings";
case "database" -> "dataset";
case "yearbook", "handbook" -> "book";
case "law report or digest", "technical report", "reporting" -> "report";
default -> genre;
};
}
private void parseIdentifier(Map<Field, String> fields, Identifier identifier, BibEntry entry) {
String type = identifier.type();
if ("citekey".equals(type) && entry.getCitationKey().isEmpty()) {
entry.setCitationKey(identifier.value());
} else if (!"local".equals(type) && !"citekey".equals(type)) {
// put all identifiers (doi, issn, isbn,...) except of local and citekey
putIfValueNotNull(fields, FieldFactory.parseField(identifier.type()), identifier.value());
}
}
private void putDate(Map<Field, String> fields, String elementName, String date) {
if (date != null) {
Optional<Date> optionalParsedDate = Date.parse(date);
switch (elementName) {
case "dateIssued" -> {
optionalParsedDate
.ifPresent(parsedDate -> fields.put(StandardField.DATE, parsedDate.getNormalized()));
optionalParsedDate.flatMap(Date::getYear)
.ifPresent(year -> fields.put(StandardField.YEAR, year.toString()));
optionalParsedDate.flatMap(Date::getMonth)
.ifPresent(month -> fields.put(StandardField.MONTH, month.getJabRefFormat()));
}
case "dateCreated" -> {
// If there was no year in date issued, then take the year from date created
fields.computeIfAbsent(StandardField.YEAR, k -> date.substring(0, 4));
fields.put(new UnknownField("created"), date);
}
case "dateCaptured" -> {
optionalParsedDate
.ifPresent(parsedDate -> fields.put(StandardField.CREATIONDATE, parsedDate.getNormalized()));
}
case "dateModified" -> {
optionalParsedDate
.ifPresent(parsedDate -> fields.put(StandardField.MODIFICATIONDATE, parsedDate.getNormalized()));
}
}
}
}
private void putIfListIsNotEmpty(Map<Field, String> fields, List<String> list, Field key, String separator) {
if (!list.isEmpty()) {
fields.put(key, list.stream().collect(Collectors.joining(separator)));
}
}
private void handleAuthorsInNamePart(List<Name> names, List<String> authors) {
List<String> foreName = new ArrayList<>();
String familyName = "";
String author = "";
for (Name name : names) {
String type = name.type(); // date, family, given, termsOfAddress
if ((type == null) && (name.value() != null)) {
String namePartValue = name.value();
namePartValue = namePartValue.replaceAll(",$", "");
authors.add(namePartValue);
} else if ("family".equals(type) && (name.value() != null)) {
// family should come first, so if family appears we can set the author then comes before
// we have to check if forename and family name are not empty in case it's the first author
if (!foreName.isEmpty() && !familyName.isEmpty()) {
// now set and add the old author
author = familyName + ", " + Joiner.on(" ").join(foreName);
authors.add(author);
// remove old forenames
foreName.clear();
} else if (foreName.isEmpty() && !familyName.isEmpty()) {
authors.add(familyName);
}
familyName = name.value();
} else if ("given".equals(type) && (name.value() != null)) {
foreName.add(name.value());
}
}
// last author is not added, so do it here
if (!foreName.isEmpty() && !familyName.isEmpty()) {
author = familyName + ", " + Joiner.on(" ").join(foreName);
authors.add(author.trim());
foreName.clear();
} else if (foreName.isEmpty() && !familyName.isEmpty()) {
authors.add(familyName.trim());
}
}
private void putIfValueNotNull(Map<Field, String> fields, Field field, String value) {
if (value != null) {
fields.put(field, value);
}
}
private void appendIfValueNotNullOrBlank(List<String> list, String value) {
if (value != null && !value.isBlank()) {
list.add(value);
}
}
private boolean isCharacterXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.CHARACTERS;
}
private boolean isStartXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.START_ELEMENT;
}
private boolean isEndXMLEvent(XMLStreamReader reader) {
return reader.getEventType() == XMLEvent.END_ELEMENT;
}
@Override
public String getName() {
return "MODS";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.XML;
}
@Override
public String getDescription() {
return "Importer for the MODS format";
}
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
try {
return importDatabase(new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))).getDatabase().getEntries();
} catch (IOException e) {
LOGGER.error(e.getLocalizedMessage(), e);
}
return Collections.emptyList();
}
}
| 25,488 | 38.640747 | 154 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/MrDLibImporter.java | /**
*
*/
package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Handles importing of recommended articles to be displayed in the Related Articles tab.
*/
public class MrDLibImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(MrDLibImporter.class);
public ParserResult parserResult;
private String recommendationsHeading;
private String recommendationsDescription;
private String recommendationSetId;
@SuppressWarnings("unused")
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
String recommendationsAsString = convertToString(input);
try {
JSONObject jsonObject = new JSONObject(recommendationsAsString);
if (!jsonObject.has("recommendations")) {
return false;
}
} catch (JSONException ex) {
return false;
}
return true;
}
@Override
public ParserResult importDatabase(BufferedReader input) throws IOException {
parse(input);
return parserResult;
}
@Override
public String getName() {
return "MrDLibImporter";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.JSON;
}
@Override
public String getDescription() {
return "Takes valid JSON documents from the Mr. DLib API and parses them into a BibEntry";
}
/**
* Convert Buffered Reader response to string for JSON parsing.
*
* @param input Takes a BufferedReader with a reference to the JSON document delivered by mdl server.
* @return Returns an String containing the JSON document.
* @throws IOException
*/
private String convertToString(BufferedReader input) throws IOException {
String line;
StringBuilder stringBuilder = new StringBuilder();
try {
while ((line = input.readLine()) != null) {
stringBuilder.append(line);
}
} catch (Exception e) {
LOGGER.error(e.getMessage());
}
return stringBuilder.toString();
}
/**
* Small pair-class to ensure the right order of the recommendations.
*/
private static class RankedBibEntry {
public BibEntry entry;
public Integer rank;
public RankedBibEntry(BibEntry entry, Integer rank) {
this.rank = rank;
this.entry = entry;
}
}
/**
* Parses the input from the server to a ParserResult
*
* @param input A BufferedReader with a reference to a string with the server's response
*/
private void parse(BufferedReader input) throws IOException {
// The Bibdatabase that gets returned in the ParserResult.
BibDatabase bibDatabase = new BibDatabase();
// The document to parse
String recommendationSet = convertToString(input);
JSONObject recommendationSetJson = new JSONObject(recommendationSet);
// The sorted BibEntries gets stored here later
List<RankedBibEntry> rankedBibEntries = new ArrayList<>();
// Get recommendations from response and populate bib entries
JSONObject recommendationsJson = recommendationSetJson.getJSONObject("recommendations");
Iterator<String> keys = recommendationsJson.keys();
while (keys.hasNext()) {
String key = keys.next();
JSONObject value = recommendationsJson.getJSONObject(key);
rankedBibEntries.add(populateBibEntry(value));
}
// Sort bib entries according to rank
rankedBibEntries.sort((RankedBibEntry rankedBibEntry1,
RankedBibEntry rankedBibEntry2) -> rankedBibEntry1.rank.compareTo(rankedBibEntry2.rank));
List<BibEntry> bibEntries = rankedBibEntries.stream().map(e -> e.entry).collect(Collectors.toList());
bibDatabase.insertEntries(bibEntries);
parserResult = new ParserResult(bibDatabase);
JSONObject label = recommendationSetJson.getJSONObject("label");
recommendationsHeading = label.getString("label-text");
recommendationsDescription = label.getString("label-description");
recommendationSetId = recommendationSetJson.getBigInteger("recommendation_set_id").toString();
}
/**
* Parses the JSON recommendations into bib entries
*
* @param recommendation JSON object of a single recommendation returned by Mr. DLib
* @return A ranked bib entry created from the recommendation input
*/
private RankedBibEntry populateBibEntry(JSONObject recommendation) {
BibEntry current = new BibEntry();
// parse each of the relevant fields into variables
String authors = isRecommendationFieldPresent(recommendation, "authors") ? recommendation.getString("authors") : "";
String title = isRecommendationFieldPresent(recommendation, "title") ? recommendation.getString("title") : "";
String year = isRecommendationFieldPresent(recommendation, "published_year") ? Integer.toString(recommendation.getInt("published_year")) : "";
String journal = isRecommendationFieldPresent(recommendation, "published_in") ? recommendation.getString("published_in") : "";
String url = isRecommendationFieldPresent(recommendation, "url") ? recommendation.getString("url") : "";
Integer rank = isRecommendationFieldPresent(recommendation, "recommendation_id") ? recommendation.getInt("recommendation_id") : 100;
// Populate bib entry with relevant data
current.setField(StandardField.AUTHOR, authors);
current.setField(StandardField.TITLE, title);
current.setField(StandardField.YEAR, year);
current.setField(StandardField.JOURNAL, journal);
current.setField(StandardField.URL, url);
return new RankedBibEntry(current, rank);
}
private Boolean isRecommendationFieldPresent(JSONObject recommendation, String field) {
return recommendation.has(field) && !recommendation.isNull(field);
}
public ParserResult getParserResult() {
return parserResult;
}
public String getRecommendationsHeading() {
return recommendationsHeading;
}
public String getRecommendationsDescription() {
return recommendationsDescription;
}
public String getRecommendationSetId() {
return recommendationSetId;
}
}
| 7,042 | 36.068421 | 150 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/MsBibImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.Objects;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.msbib.MSBibDatabase;
import org.jabref.logic.util.StandardFileType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.xml.sax.ErrorHandler;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
/**
* Importer for the MS Office 2007 XML bibliography format
*/
public class MsBibImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(MsBibImporter.class);
private static final String DISABLEDTD = "http://apache.org/xml/features/disallow-doctype-decl";
private static final String DISABLEEXTERNALDTD = "http://apache.org/xml/features/nonvalidating/load-external-dtd";
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
/*
The correct behavior is to return false if it is certain that the file is
not of the MsBib type, and true otherwise. Returning true is the safe choice
if not certain.
*/
Document docin;
try {
DocumentBuilder dbuild = makeSafeDocBuilderFactory(DocumentBuilderFactory.newInstance()).newDocumentBuilder();
dbuild.setErrorHandler(new ErrorHandler() {
@Override
public void warning(SAXParseException exception) throws SAXException {
// ignore warnings
}
@Override
public void fatalError(SAXParseException exception) throws SAXException {
throw exception;
}
@Override
public void error(SAXParseException exception) throws SAXException {
throw exception;
}
});
docin = dbuild.parse(new InputSource(reader));
} catch (Exception e) {
return false;
}
return (docin == null) || docin.getDocumentElement().getTagName().contains("Sources");
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
MSBibDatabase dbase = new MSBibDatabase();
return new ParserResult(dbase.importEntriesFromXml(reader));
}
@Override
public String getName() {
return "MSBib";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.XML;
}
@Override
public String getDescription() {
return "Importer for the MS Office 2007 XML bibliography format.";
}
/**
* DocumentBuilderFactory makes a XXE safe Builder factory from dBuild. If not supported by current
* XML then returns original builder given and logs error.
*
* @param dBuild | DocumentBuilderFactory to be made XXE safe.
* @return If supported, XXE safe DocumentBuilderFactory. Else, returns original builder given
*/
private DocumentBuilderFactory makeSafeDocBuilderFactory(DocumentBuilderFactory dBuild) {
String feature = null;
try {
feature = DISABLEDTD;
dBuild.setFeature(feature, true);
feature = DISABLEEXTERNALDTD;
dBuild.setFeature(feature, false);
dBuild.setXIncludeAware(false);
dBuild.setExpandEntityReferences(false);
} catch (ParserConfigurationException e) {
LOGGER.warn("Builder not fully configured. Feature:'{}' is probably not supported by current XML processor. {}", feature, e);
}
return dBuild;
}
}
| 4,013 | 33.016949 | 137 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/OvidImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.EntryTypeFactory;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Imports an Ovid file.
*/
public class OvidImporter extends Importer {
private static final Pattern OVID_SOURCE_PATTERN = Pattern
.compile("Source ([ \\w&\\-,:]+)\\.[ ]+([0-9]+)\\(([\\w\\-]+)\\):([0-9]+\\-?[0-9]+?)\\,.*([0-9][0-9][0-9][0-9])");
private static final Pattern OVID_SOURCE_PATTERN_NO_ISSUE = Pattern
.compile("Source ([ \\w&\\-,:]+)\\.[ ]+([0-9]+):([0-9]+\\-?[0-9]+?)\\,.*([0-9][0-9][0-9][0-9])");
private static final Pattern OVID_SOURCE_PATTERN_2 = Pattern.compile(
"([ \\w&\\-,]+)\\. Vol ([0-9]+)\\(([\\w\\-]+)\\) ([A-Za-z]+) ([0-9][0-9][0-9][0-9]), ([0-9]+\\-?[0-9]+)");
private static final Pattern INCOLLECTION_PATTERN = Pattern.compile(
"(.+)\\(([0-9][0-9][0-9][0-9])\\)\\. ([ \\w&\\-,:]+)\\.[ ]+\\(pp. ([0-9]+\\-?[0-9]+?)\\).[A-Za-z0-9, ]+pp\\. "
+ "([\\w, ]+): ([\\w, ]+)");
private static final Pattern BOOK_PATTERN = Pattern.compile(
"\\(([0-9][0-9][0-9][0-9])\\)\\. [A-Za-z, ]+([0-9]+) pp\\. ([\\w, ]+): ([\\w, ]+)");
private static final String OVID_PATTERN_STRING = "<[0-9]+>";
private static final Pattern OVID_PATTERN = Pattern.compile(OVID_PATTERN_STRING);
private static final int MAX_ITEMS = 50;
@Override
public String getName() {
return "Ovid";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.TXT;
}
@Override
public String getDescription() {
return "Imports an Ovid file.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
String str;
int i = 0;
while (((str = reader.readLine()) != null) && (i < MAX_ITEMS)) {
if (OvidImporter.OVID_PATTERN.matcher(str).find()) {
return true;
}
i++;
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
StringBuilder sb = new StringBuilder();
String line;
while ((line = reader.readLine()) != null) {
if (!line.isEmpty() && (line.charAt(0) != ' ')) {
sb.append("__NEWFIELD__");
}
sb.append(line);
sb.append('\n');
}
String[] items = sb.toString().split(OVID_PATTERN_STRING);
for (int i = 1; i < items.length; i++) {
Map<Field, String> h = new HashMap<>();
String[] fields = items[i].split("__NEWFIELD__");
for (String field : fields) {
int linebreak = field.indexOf('\n');
String fieldName = field.substring(0, linebreak).trim();
String content = field.substring(linebreak).trim();
// Check if this is the author field (due to a minor special treatment for this field):
boolean isAuthor = (fieldName.indexOf("Author") == 0)
&& !fieldName.contains("Author Keywords")
&& !fieldName.contains("Author e-mail");
// Remove unnecessary dots at the end of lines, unless this is the author field,
// in which case a dot at the end could be significant:
if (!isAuthor && content.endsWith(".")) {
content = content.substring(0, content.length() - 1);
}
if (isAuthor) {
h.put(StandardField.AUTHOR, content);
} else if (fieldName.startsWith("Title")) {
content = content.replaceAll("\\[.+\\]", "").trim();
if (content.endsWith(".")) {
content = content.substring(0, content.length() - 1);
}
h.put(StandardField.TITLE, content);
} else if (fieldName.startsWith("Chapter Title")) {
h.put(new UnknownField("chaptertitle"), content);
} else if (fieldName.startsWith("Source")) {
Matcher matcher;
if ((matcher = OvidImporter.OVID_SOURCE_PATTERN.matcher(content)).find()) {
h.put(StandardField.JOURNAL, matcher.group(1));
h.put(StandardField.VOLUME, matcher.group(2));
h.put(StandardField.ISSUE, matcher.group(3));
h.put(StandardField.PAGES, matcher.group(4));
h.put(StandardField.YEAR, matcher.group(5));
} else if ((matcher = OvidImporter.OVID_SOURCE_PATTERN_NO_ISSUE.matcher(content)).find()) { // may be missing the issue
h.put(StandardField.JOURNAL, matcher.group(1));
h.put(StandardField.VOLUME, matcher.group(2));
h.put(StandardField.PAGES, matcher.group(3));
h.put(StandardField.YEAR, matcher.group(4));
} else if ((matcher = OvidImporter.OVID_SOURCE_PATTERN_2.matcher(content)).find()) {
h.put(StandardField.JOURNAL, matcher.group(1));
h.put(StandardField.VOLUME, matcher.group(2));
h.put(StandardField.ISSUE, matcher.group(3));
h.put(StandardField.MONTH, matcher.group(4));
h.put(StandardField.YEAR, matcher.group(5));
h.put(StandardField.PAGES, matcher.group(6));
} else if ((matcher = OvidImporter.INCOLLECTION_PATTERN.matcher(content)).find()) {
h.put(StandardField.EDITOR, matcher.group(1).replace(" (Ed)", ""));
h.put(StandardField.YEAR, matcher.group(2));
h.put(StandardField.BOOKTITLE, matcher.group(3));
h.put(StandardField.PAGES, matcher.group(4));
h.put(StandardField.ADDRESS, matcher.group(5));
h.put(StandardField.PUBLISHER, matcher.group(6));
} else if ((matcher = OvidImporter.BOOK_PATTERN.matcher(content)).find()) {
h.put(StandardField.YEAR, matcher.group(1));
h.put(StandardField.PAGES, matcher.group(2));
h.put(StandardField.ADDRESS, matcher.group(3));
h.put(StandardField.PUBLISHER, matcher.group(4));
}
// Add double hyphens to page ranges:
if (h.get(StandardField.PAGES) != null) {
h.put(StandardField.PAGES, h.get(StandardField.PAGES).replace("-", "--"));
}
} else if ("Abstract".equals(fieldName)) {
h.put(StandardField.ABSTRACT, content);
} else if ("Publication Type".equals(fieldName)) {
if (content.contains("Book")) {
h.put(InternalField.TYPE_HEADER, "book");
} else if (content.contains("Journal")) {
h.put(InternalField.TYPE_HEADER, "article");
} else if (content.contains("Conference Paper")) {
h.put(InternalField.TYPE_HEADER, "inproceedings");
}
} else if (fieldName.startsWith("Language")) {
h.put(StandardField.LANGUAGE, content);
} else if (fieldName.startsWith("Author Keywords")) {
content = content.replace(";", ",").replace(" ", " ");
h.put(StandardField.KEYWORDS, content);
} else if (fieldName.startsWith("ISSN")) {
h.put(StandardField.ISSN, content);
} else if (fieldName.startsWith("DOI Number")) {
h.put(StandardField.DOI, content);
}
}
// Now we need to check if a book entry has given editors in the author field;
// if so, rearrange:
String auth = h.get(StandardField.AUTHOR);
if ((auth != null) && auth.contains(" [Ed]")) {
h.remove(StandardField.AUTHOR);
h.put(StandardField.EDITOR, auth.replace(" [Ed]", ""));
}
// Rearrange names properly:
auth = h.get(StandardField.AUTHOR);
if (auth != null) {
h.put(StandardField.AUTHOR, fixNames(auth));
}
auth = h.get(StandardField.EDITOR);
if (auth != null) {
h.put(StandardField.EDITOR, fixNames(auth));
}
// Set the entrytype properly:
EntryType entryType = h.containsKey(InternalField.TYPE_HEADER) ? EntryTypeFactory.parse(h.get(InternalField.TYPE_HEADER)) : BibEntry.DEFAULT_TYPE;
h.remove(InternalField.TYPE_HEADER);
if (entryType.equals(StandardEntryType.Book) && h.containsKey(new UnknownField("chaptertitle"))) {
// This means we have an "incollection" entry.
entryType = StandardEntryType.InCollection;
// Move the "chaptertitle" to just "title":
h.put(StandardField.TITLE, h.remove(new UnknownField("chaptertitle")));
}
BibEntry b = new BibEntry(entryType);
b.setField(h);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
/**
* Convert a string of author names into a BibTeX-compatible format.
*
* @param content The name string.
* @return The formatted names.
*/
private static String fixNames(String content) {
String names;
if (content.indexOf(';') > 0) { // LN FN; [LN FN;]*
names = content.replaceAll("[^\\.A-Za-z,;\\- ]", "").replace(";", " and");
} else if (content.indexOf(" ") > 0) {
String[] sNames = content.split(" ");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < sNames.length; i++) {
if (i > 0) {
sb.append(" and ");
}
sb.append(sNames[i].replaceFirst(" ", ", "));
}
names = sb.toString();
} else {
names = content;
}
return AuthorList.fixAuthorLastNameFirst(names);
}
}
| 11,213 | 45.338843 | 158 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfContentImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringWriter;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.EncryptedPdfsNotSupportedException;
import org.jabref.logic.xmp.XmpUtilReader;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.StringUtil;
import com.google.common.base.Strings;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
/**
* PdfContentImporter parses data of the first page of the PDF and creates a BibTeX entry.
* <p>
* Currently, Springer and IEEE formats are supported.
* <p>
*/
public class PdfContentImporter extends Importer {
private static final Pattern YEAR_EXTRACT_PATTERN = Pattern.compile("\\d{4}");
private final ImportFormatPreferences importFormatPreferences;
// input lines into several lines
private String[] lines;
// current index in lines
private int lineIndex;
private String curString;
private String year;
public PdfContentImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
/**
* Removes all non-letter characters at the end
* <p>
* EXCEPTION: a closing bracket is NOT removed
* </p>
* <p>
* TODO: Additionally replace multiple subsequent spaces by one space, which will cause a rename of this method
* </p>
*/
private String removeNonLettersAtEnd(String input) {
String result = input.trim();
if (result.isEmpty()) {
return result;
}
char lastC = result.charAt(result.length() - 1);
while (!Character.isLetter(lastC) && (lastC != ')')) {
// if there is an asterix, a dot or something else at the end: remove it
result = result.substring(0, result.length() - 1);
if (result.isEmpty()) {
break;
} else {
lastC = result.charAt(result.length() - 1);
}
}
return result;
}
private String streamlineNames(String names) {
// TODO: replace with NormalizeNamesFormatter?!
String res;
// supported formats:
// Matthias Schrepfer1, Johannes Wolf1, Jan Mendling1, and Hajo A. Reijers2
if (names.contains(",")) {
String[] splitNames = names.split(",");
res = "";
boolean isFirst = true;
for (String splitName : splitNames) {
String curName = removeNonLettersAtEnd(splitName);
if (curName.indexOf("and") == 0) {
// skip possible ands between names
curName = curName.substring(3).trim();
} else {
int posAnd = curName.indexOf(" and ");
if (posAnd >= 0) {
String nameBefore = curName.substring(0, posAnd);
// cannot be first name as "," is contained in the string
res = res.concat(" and ").concat(removeNonLettersAtEnd(nameBefore));
curName = curName.substring(posAnd + 5);
}
}
if (!"".equals(curName)) {
if ("et al.".equalsIgnoreCase(curName)) {
curName = "others";
}
if (isFirst) {
isFirst = false;
} else {
res = res.concat(" and ");
}
res = res.concat(curName);
}
}
} else {
// assumption: names separated by space
String[] splitNames = names.split(" ");
if (splitNames.length == 0) {
// empty names... something was really wrong...
return "";
}
boolean workedOnFirstOrMiddle = false;
boolean isFirst = true;
int i = 0;
res = "";
do {
if (workedOnFirstOrMiddle) {
// last item was a first or a middle name
// we have to check whether we are on a middle name
// if not, just add the item as last name and add an "and"
if (splitNames[i].contains(".")) {
// we found a middle name
res = res.concat(splitNames[i]).concat(" ");
} else {
// last name found
res = res.concat(removeNonLettersAtEnd(splitNames[i]));
if (!splitNames[i].isEmpty() && Character.isLowerCase(splitNames[i].charAt(0))) {
// it is probably be "van", "vom", ...
// we just rely on the fact that these things are written in lower case letters
// do NOT finish name
res = res.concat(" ");
} else {
// finish this name
workedOnFirstOrMiddle = false;
}
}
} else {
if ("and".equalsIgnoreCase(splitNames[i])) {
// do nothing, just increment i at the end of this iteration
} else {
if (isFirst) {
isFirst = false;
} else {
res = res.concat(" and ");
}
if ("et".equalsIgnoreCase(splitNames[i]) && (splitNames.length > (i + 1))
&& "al.".equalsIgnoreCase(splitNames[i + 1])) {
res = res.concat("others");
break;
} else {
res = res.concat(splitNames[i]).concat(" ");
workedOnFirstOrMiddle = true;
}
}
}
i++;
} while (i < splitNames.length);
}
return res;
}
private String streamlineTitle(String title) {
return removeNonLettersAtEnd(title);
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return input.readLine().startsWith("%PDF");
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException("PdfContentImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException("PdfContentImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) {
final ArrayList<BibEntry> result = new ArrayList<>(1);
try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) {
String firstPageContents = getFirstPageContents(document);
Optional<BibEntry> entry = getEntryFromPDFContent(firstPageContents, OS.NEWLINE);
entry.ifPresent(result::add);
} catch (EncryptedPdfsNotSupportedException e) {
return ParserResult.fromErrorMessage(Localization.lang("Decryption not supported."));
} catch (IOException exception) {
return ParserResult.fromError(exception);
}
result.forEach(entry -> entry.addFile(new LinkedFile("", filePath.toAbsolutePath(), "PDF")));
return new ParserResult(result);
}
// make this method package visible so we can test it
Optional<BibEntry> getEntryFromPDFContent(String firstpageContents, String lineSeparator) {
// idea: split[] contains the different lines
// blocks are separated by empty lines
// treat each block
// or do special treatment at authors (which are not broken)
// therefore, we do a line-based and not a block-based splitting
// i points to the current line
// curString (mostly) contains the current block
// the different lines are joined into one and thereby separated by " "
String firstpageContentsUnifiedLineBreaks = StringUtil.unifyLineBreaks(firstpageContents, lineSeparator);
lines = firstpageContentsUnifiedLineBreaks.split(lineSeparator);
lineIndex = 0; // to prevent array index out of bounds exception on second run we need to reset i to zero
proceedToNextNonEmptyLine();
if (lineIndex >= lines.length) {
// PDF could not be parsed or is empty
// return empty list
return Optional.empty();
}
// we start at the current line
curString = lines[lineIndex];
// i might get incremented later and curString modified, too
lineIndex = lineIndex + 1;
String author;
String editor = null;
String abstractT = null;
String keywords = null;
String title;
String conference = null;
String DOI = null;
String series = null;
String volume = null;
String number = null;
String pages = null;
// year is a class variable as the method extractYear() uses it;
String publisher = null;
EntryType type = StandardEntryType.InProceedings;
if (curString.length() > 4) {
// special case: possibly conference as first line on the page
extractYear();
if (curString.contains("Conference")) {
fillCurStringWithNonEmptyLines();
conference = curString;
curString = "";
} else {
// e.g. Copyright (c) 1998 by the Genetics Society of America
// future work: get year using RegEx
String lower = curString.toLowerCase(Locale.ROOT);
if (lower.contains("copyright")) {
fillCurStringWithNonEmptyLines();
publisher = curString;
curString = "";
}
}
}
// start: title
fillCurStringWithNonEmptyLines();
title = streamlineTitle(curString);
curString = "";
// i points to the next non-empty line
// after title: authors
author = null;
while ((lineIndex < lines.length) && !"".equals(lines[lineIndex])) {
// author names are unlikely to be lines among different lines
// treat them line by line
curString = streamlineNames(lines[lineIndex]);
if (author == null) {
author = curString;
} else {
if ("".equals(curString)) {
// if lines[i] is "and" then "" is returned by streamlineNames -> do nothing
} else {
author = author.concat(" and ").concat(curString);
}
}
lineIndex++;
}
curString = "";
lineIndex++;
// then, abstract and keywords follow
while (lineIndex < lines.length) {
curString = lines[lineIndex];
if ((curString.length() >= "Abstract".length()) && "Abstract".equalsIgnoreCase(curString.substring(0, "Abstract".length()))) {
if (curString.length() == "Abstract".length()) {
// only word "abstract" found -- skip line
curString = "";
} else {
curString = curString.substring("Abstract".length() + 1).trim().concat(System.lineSeparator());
}
lineIndex++;
// fillCurStringWithNonEmptyLines() cannot be used as that uses " " as line separator
// whereas we need linebreak as separator
while ((lineIndex < lines.length) && !"".equals(lines[lineIndex])) {
curString = curString.concat(lines[lineIndex]).concat(System.lineSeparator());
lineIndex++;
}
abstractT = curString.trim();
lineIndex++;
} else if ((curString.length() >= "Keywords".length()) && "Keywords".equalsIgnoreCase(curString.substring(0, "Keywords".length()))) {
if (curString.length() == "Keywords".length()) {
// only word "Keywords" found -- skip line
curString = "";
} else {
curString = curString.substring("Keywords".length() + 1).trim();
}
lineIndex++;
fillCurStringWithNonEmptyLines();
keywords = removeNonLettersAtEnd(curString);
} else {
String lower = curString.toLowerCase(Locale.ROOT);
int pos = lower.indexOf("technical");
if (pos >= 0) {
type = StandardEntryType.TechReport;
pos = curString.trim().lastIndexOf(' ');
if (pos >= 0) {
// assumption: last character of curString is NOT ' '
// otherwise pos+1 leads to an out-of-bounds exception
number = curString.substring(pos + 1);
}
}
lineIndex++;
proceedToNextNonEmptyLine();
}
}
lineIndex = lines.length - 1;
// last block: DOI, detailed information
// sometimes, this information is in the third last block etc...
// therefore, read until the beginning of the file
while (lineIndex >= 0) {
readLastBlock();
// i now points to the block before or is -1
// curString contains the last block, separated by " "
extractYear();
int pos = curString.indexOf("(Eds.)");
if ((pos >= 0) && (publisher == null)) {
// looks like a Springer last line
// e.g: A. Persson and J. Stirna (Eds.): PoEM 2009, LNBIP 39, pp. 161-175, 2009.
publisher = "Springer";
editor = streamlineNames(curString.substring(0, pos - 1));
int edslength = "(Eds.)".length();
int posWithEditor = pos + edslength + 2; // +2 because of ":" after (Eds.) and the subsequent space
if (posWithEditor > curString.length()) {
curString = curString.substring(posWithEditor - 2); // we don't have any spaces after Eds so we substract the 2
} else {
curString = curString.substring(posWithEditor);
}
String[] springerSplit = curString.split(", ");
if (springerSplit.length >= 4) {
conference = springerSplit[0];
String seriesData = springerSplit[1];
int lastSpace = seriesData.lastIndexOf(' ');
series = seriesData.substring(0, lastSpace);
volume = seriesData.substring(lastSpace + 1);
pages = springerSplit[2].substring(4);
if (springerSplit[3].length() >= 4) {
year = springerSplit[3].substring(0, 4);
}
}
} else {
if (DOI == null) {
pos = curString.indexOf("DOI");
if (pos < 0) {
pos = curString.indexOf(StandardField.DOI.getName());
}
if (pos >= 0) {
pos += 3;
if (curString.length() > pos) {
char delimiter = curString.charAt(pos);
if ((delimiter == ':') || (delimiter == ' ')) {
pos++;
}
int nextSpace = curString.indexOf(' ', pos);
if (nextSpace > 0) {
DOI = curString.substring(pos, nextSpace);
} else {
DOI = curString.substring(pos);
}
}
}
}
if ((publisher == null) && curString.contains("IEEE")) {
// IEEE has the conference things at the end
publisher = "IEEE";
// year is extracted by extractYear
// otherwise, we could it determine as follows:
// String yearStr = curString.substring(curString.length()-4);
// if (isYear(yearStr)) {
// year = yearStr;
// }
if (conference == null) {
pos = curString.indexOf('$');
if (pos > 0) {
// we found the price
// before the price, the ISSN is stated
// skip that
pos -= 2;
while ((pos >= 0) && (curString.charAt(pos) != ' ')) {
pos--;
}
if (pos > 0) {
conference = curString.substring(0, pos);
}
}
}
}
}
}
BibEntry entry = new BibEntry();
entry.setType(type);
// TODO: institution parsing missing
if (author != null) {
entry.setField(StandardField.AUTHOR, author);
}
if (editor != null) {
entry.setField(StandardField.EDITOR, editor);
}
if (abstractT != null) {
entry.setField(StandardField.ABSTRACT, abstractT);
}
if (!Strings.isNullOrEmpty(keywords)) {
entry.setField(StandardField.KEYWORDS, keywords);
}
if (title != null) {
entry.setField(StandardField.TITLE, title);
}
if (conference != null) {
entry.setField(StandardField.BOOKTITLE, conference);
}
if (DOI != null) {
entry.setField(StandardField.DOI, DOI);
}
if (series != null) {
entry.setField(StandardField.SERIES, series);
}
if (volume != null) {
entry.setField(StandardField.VOLUME, volume);
}
if (number != null) {
entry.setField(StandardField.NUMBER, number);
}
if (pages != null) {
entry.setField(StandardField.PAGES, pages);
}
if (year != null) {
entry.setField(StandardField.YEAR, year);
}
if (publisher != null) {
entry.setField(StandardField.PUBLISHER, publisher);
}
return Optional.of(entry);
}
private String getFirstPageContents(PDDocument document) throws IOException {
PDFTextStripper stripper = new PDFTextStripper();
stripper.setStartPage(1);
stripper.setEndPage(1);
stripper.setSortByPosition(true);
stripper.setParagraphEnd(System.lineSeparator());
StringWriter writer = new StringWriter();
stripper.writeText(document, writer);
return writer.toString();
}
/**
* Extract the year out of curString (if it is not yet defined)
*/
private void extractYear() {
if (year != null) {
return;
}
Matcher m = YEAR_EXTRACT_PATTERN.matcher(curString);
if (m.find()) {
year = curString.substring(m.start(), m.end());
}
}
/**
* PDFTextStripper normally does NOT produce multiple empty lines
* (besides at strange PDFs). These strange PDFs are handled here:
* proceed to next non-empty line
*/
private void proceedToNextNonEmptyLine() {
while ((lineIndex < lines.length) && "".equals(lines[lineIndex].trim())) {
lineIndex++;
}
}
/**
* Fill curString with lines until "" is found
* No trailing space is added
* i is advanced to the next non-empty line (ignoring white space)
* <p>
* Lines containing only white spaces are ignored,
* but NOT considered as ""
* <p>
* Uses GLOBAL variables lines, curLine, i
*/
private void fillCurStringWithNonEmptyLines() {
// ensure that curString does not end with " "
curString = curString.trim();
while ((lineIndex < lines.length) && !"".equals(lines[lineIndex])) {
String curLine = lines[lineIndex].trim();
if (!"".equals(curLine)) {
if (!curString.isEmpty()) {
// insert separating space if necessary
curString = curString.concat(" ");
}
curString = curString.concat(lines[lineIndex]);
}
lineIndex++;
}
proceedToNextNonEmptyLine();
}
/**
* resets curString
* curString now contains the last block (until "" reached)
* Trailing space is added
* <p>
* invariant before/after: i points to line before the last handled block
*/
private void readLastBlock() {
while ((lineIndex >= 0) && "".equals(lines[lineIndex].trim())) {
lineIndex--;
}
// i is now at the end of a block
int end = lineIndex;
// find beginning
while ((lineIndex >= 0) && !"".equals(lines[lineIndex])) {
lineIndex--;
}
// i is now the line before the beginning of the block
// this fulfills the invariant
curString = "";
for (int j = lineIndex + 1; j <= end; j++) {
curString = curString.concat(lines[j].trim());
if (j != end) {
curString = curString.concat(" ");
}
}
}
@Override
public String getName() {
return "PDFcontent";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public String getDescription() {
return "PdfContentImporter parses data of the first page of the PDF and creates a BibTeX entry. Currently, Springer and IEEE formats are supported.";
}
}
| 23,549 | 37.733553 | 157 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfEmbeddedBibFileImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.logic.xmp.EncryptedPdfsNotSupportedException;
import org.jabref.logic.xmp.XmpUtilReader;
import org.jabref.model.entry.BibEntry;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDDocumentNameDictionary;
import org.apache.pdfbox.pdmodel.PDEmbeddedFilesNameTreeNode;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.common.PDNameTreeNode;
import org.apache.pdfbox.pdmodel.common.filespecification.PDComplexFileSpecification;
import org.apache.pdfbox.pdmodel.common.filespecification.PDEmbeddedFile;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDAnnotation;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDAnnotationFileAttachment;
/**
* PdfEmbeddedBibFileImporter imports an embedded Bib-File from the PDF.
*/
public class PdfEmbeddedBibFileImporter extends Importer {
private final ImportFormatPreferences importFormatPreferences;
private final BibtexParser bibtexParser;
public PdfEmbeddedBibFileImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
bibtexParser = new BibtexParser(importFormatPreferences);
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return input.readLine().startsWith("%PDF");
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException("PdfEmbeddedBibFileImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException("PdfEmbeddedBibFileImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) {
try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) {
return new ParserResult(getEmbeddedBibFileEntries(document));
} catch (EncryptedPdfsNotSupportedException e) {
return ParserResult.fromErrorMessage(Localization.lang("Decryption not supported."));
} catch (IOException | ParseException e) {
return ParserResult.fromError(e);
}
}
/**
* Extraction of embedded files in pdfs adapted from:
* Adapted from https://svn.apache.org/repos/asf/pdfbox/trunk/examples/src/main/java/org/apache/pdfbox/examples/pdmodel/ExtractEmbeddedFiles.javaj
*/
private List<BibEntry> getEmbeddedBibFileEntries(PDDocument document) throws IOException, ParseException {
List<BibEntry> allParsedEntries = new ArrayList<>();
PDDocumentNameDictionary nameDictionary = document.getDocumentCatalog().getNames();
if (nameDictionary != null) {
PDEmbeddedFilesNameTreeNode efTree = nameDictionary.getEmbeddedFiles();
if (efTree != null) {
Map<String, PDComplexFileSpecification> names = efTree.getNames();
if (names != null) {
allParsedEntries.addAll(extractAndParseFiles(names));
} else {
List<PDNameTreeNode<PDComplexFileSpecification>> kids = efTree.getKids();
if (kids != null) {
for (PDNameTreeNode<PDComplexFileSpecification> node : kids) {
names = node.getNames();
allParsedEntries.addAll(extractAndParseFiles(names));
}
}
}
}
}
// extract files from annotations
for (PDPage page : document.getPages()) {
for (PDAnnotation annotation : page.getAnnotations()) {
if (annotation instanceof PDAnnotationFileAttachment) {
PDAnnotationFileAttachment annotationFileAttachment = (PDAnnotationFileAttachment) annotation;
PDComplexFileSpecification fileSpec = (PDComplexFileSpecification) annotationFileAttachment.getFile();
allParsedEntries.addAll(extractAndParseFile(getEmbeddedFile(fileSpec)));
}
}
}
return allParsedEntries;
}
private List<BibEntry> extractAndParseFiles(Map<String, PDComplexFileSpecification> names) throws IOException, ParseException {
List<BibEntry> allParsedEntries = new ArrayList<>();
for (Map.Entry<String, PDComplexFileSpecification> entry : names.entrySet()) {
String filename = entry.getKey();
FileUtil.getFileExtension(filename);
if (FileUtil.isBibFile(Path.of(filename))) {
PDComplexFileSpecification fileSpec = entry.getValue();
allParsedEntries.addAll(extractAndParseFile(getEmbeddedFile(fileSpec)));
}
}
return allParsedEntries;
}
private List<BibEntry> extractAndParseFile(PDEmbeddedFile embeddedFile) throws IOException, ParseException {
return bibtexParser.parseEntries(embeddedFile.createInputStream());
}
private static PDEmbeddedFile getEmbeddedFile(PDComplexFileSpecification fileSpec) {
// search for the first available alternative of the embedded file
PDEmbeddedFile embeddedFile = null;
if (fileSpec != null) {
embeddedFile = fileSpec.getEmbeddedFileUnicode();
if (embeddedFile == null) {
embeddedFile = fileSpec.getEmbeddedFileDos();
}
if (embeddedFile == null) {
embeddedFile = fileSpec.getEmbeddedFileMac();
}
if (embeddedFile == null) {
embeddedFile = fileSpec.getEmbeddedFileUnix();
}
if (embeddedFile == null) {
embeddedFile = fileSpec.getEmbeddedFile();
}
}
return embeddedFile;
}
@Override
public String getName() {
return "PDFembeddedbibfile";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public String getDescription() {
return "PdfEmbeddedBibFileImporter imports an embedded Bib-File from the PDF.";
}
}
| 7,099 | 41.771084 | 150 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfGrobidImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.util.GrobidService;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
/**
* Wraps the GrobidService function to be used as an Importer.
*/
public class PdfGrobidImporter extends Importer {
private final GrobidService grobidService;
private final ImportFormatPreferences importFormatPreferences;
public PdfGrobidImporter(ImportFormatPreferences importFormatPreferences) {
this.grobidService = new GrobidService(importFormatPreferences.grobidPreferences());
this.importFormatPreferences = importFormatPreferences;
}
@Override
public String getName() {
return "Grobid";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException(
"PdfGrobidImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException(
"PdfGrobidImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) {
Objects.requireNonNull(filePath);
try {
List<BibEntry> result = grobidService.processPDF(filePath, importFormatPreferences);
result.forEach(entry -> entry.addFile(new LinkedFile("", filePath.toAbsolutePath(), "PDF")));
return new ParserResult(result);
} catch (Exception exception) {
return ParserResult.fromError(exception);
}
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
return false;
}
/**
* Returns whether the given stream contains data that is a.) a pdf and b.)
* contains at least one BibEntry.
*/
@Override
public boolean isRecognizedFormat(Path filePath) throws IOException {
Objects.requireNonNull(filePath);
Optional<String> extension = FileUtil.getFileExtension(filePath);
if (extension.isEmpty()) {
return false;
}
return getFileType().getExtensions().contains(extension.get());
}
@Override
public String getId() {
return "grobidPdf";
}
@Override
public String getDescription() {
return "Wraps the GrobidService function to be used as an Importer.";
}
}
| 3,339 | 32.4 | 105 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfMergeMetadataImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import org.jabref.logic.importer.EntryBasedFetcher;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fetcher.DoiFetcher;
import org.jabref.logic.importer.fetcher.isbntobibtex.EbookDeIsbnFetcher;
import org.jabref.logic.importer.fetcher.isbntobibtex.IsbnFetcher;
import org.jabref.logic.importer.util.FileFieldParser;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.preferences.FilePreferences;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* PdfEmbeddedBibFileImporter imports an embedded Bib-File from the PDF.
*/
public class PdfMergeMetadataImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(PdfMergeMetadataImporter.class);
private final List<Importer> metadataImporters;
private final ImportFormatPreferences importFormatPreferences;
public PdfMergeMetadataImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
this.metadataImporters = new ArrayList<>();
this.metadataImporters.add(new PdfVerbatimBibTextImporter(importFormatPreferences));
this.metadataImporters.add(new PdfEmbeddedBibFileImporter(importFormatPreferences));
if (importFormatPreferences.grobidPreferences().isGrobidEnabled()) {
this.metadataImporters.add(new PdfGrobidImporter(importFormatPreferences));
}
this.metadataImporters.add(new PdfXmpImporter(importFormatPreferences.xmpPreferences()));
this.metadataImporters.add(new PdfContentImporter(importFormatPreferences));
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return input.readLine().startsWith("%PDF");
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException("PdfMergeMetadataImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException("PdfMergeMetadataImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) throws IOException {
List<BibEntry> candidates = new ArrayList<>();
for (Importer metadataImporter : metadataImporters) {
List<BibEntry> extractedEntries = metadataImporter.importDatabase(filePath).getDatabase().getEntries();
if (extractedEntries.size() == 0) {
continue;
}
candidates.add(extractedEntries.get(0));
}
if (candidates.isEmpty()) {
return new ParserResult();
}
List<BibEntry> fetchedCandidates = new ArrayList<>();
for (BibEntry candidate : candidates) {
if (candidate.hasField(StandardField.DOI)) {
try {
new DoiFetcher(importFormatPreferences).performSearchById(candidate.getField(StandardField.DOI).get()).ifPresent(fetchedCandidates::add);
} catch (FetcherException e) {
LOGGER.error("Fetching failed for DOI \"{}\".", candidate.getField(StandardField.DOI).get(), e);
}
}
if (candidate.hasField(StandardField.ISBN)) {
try {
new IsbnFetcher(importFormatPreferences)
.addRetryFetcher(new EbookDeIsbnFetcher(importFormatPreferences))
// .addRetryFetcher(new DoiToBibtexConverterComIsbnFetcher(importFormatPreferences))
.performSearchById(candidate.getField(StandardField.ISBN).get()).ifPresent(fetchedCandidates::add);
} catch (FetcherException e) {
LOGGER.error("Fetching failed for ISBN \"{}\".", candidate.getField(StandardField.ISBN).get(), e);
}
}
}
candidates.addAll(0, fetchedCandidates);
BibEntry entry = new BibEntry();
for (BibEntry candidate : candidates) {
if (BibEntry.DEFAULT_TYPE.equals(entry.getType())) {
entry.setType(candidate.getType());
}
Set<Field> presentFields = entry.getFields();
for (Map.Entry<Field, String> fieldEntry : candidate.getFieldMap().entrySet()) {
// Don't merge FILE fields that point to a stored file as we set that to filePath anyway.
// Nevertheless, retain online links.
if (StandardField.FILE == fieldEntry.getKey() &&
FileFieldParser.parse(fieldEntry.getValue()).stream().noneMatch(LinkedFile::isOnlineLink)) {
continue;
}
// Only overwrite non-present fields
if (!presentFields.contains(fieldEntry.getKey())) {
entry.setField(fieldEntry.getKey(), fieldEntry.getValue());
}
}
}
entry.addFile(new LinkedFile("", filePath, StandardFileType.PDF.getName()));
return new ParserResult(List.of(entry));
}
@Override
public String getName() {
return "PDFmergemetadata";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public String getDescription() {
return "PdfMergeMetadataImporter imports metadata from a PDF using multiple strategies and merging the result.";
}
public static class EntryBasedFetcherWrapper extends PdfMergeMetadataImporter implements EntryBasedFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(EntryBasedFetcherWrapper.class);
private final FilePreferences filePreferences;
private final BibDatabaseContext databaseContext;
public EntryBasedFetcherWrapper(ImportFormatPreferences importFormatPreferences, FilePreferences filePreferences, BibDatabaseContext context) {
super(importFormatPreferences);
this.filePreferences = filePreferences;
this.databaseContext = context;
}
@Override
public List<BibEntry> performSearch(BibEntry entry) throws FetcherException {
for (LinkedFile file : entry.getFiles()) {
Optional<Path> filePath = file.findIn(databaseContext, filePreferences);
if (filePath.isPresent()) {
try {
ParserResult result = importDatabase(filePath.get());
if (!result.isEmpty()) {
return result.getDatabase().getEntries();
}
} catch (IOException e) {
LOGGER.error("Cannot read {}", filePath.get(), e);
}
}
}
return List.of();
}
}
}
| 7,883 | 43.044693 | 157 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfVerbatimBibTextImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringWriter;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.EncryptedPdfsNotSupportedException;
import org.jabref.logic.xmp.XmpUtilReader;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
/**
* This importer imports a verbatim BibTeX entry from the first page of the PDF.
*/
public class PdfVerbatimBibTextImporter extends Importer {
private final ImportFormatPreferences importFormatPreferences;
public PdfVerbatimBibTextImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
@Override
public boolean isRecognizedFormat(BufferedReader input) throws IOException {
return input.readLine().startsWith("%PDF");
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException("PdfVerbatimBibTextImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException("PdfVerbatimBibTextImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) {
List<BibEntry> result = new ArrayList<>(1);
try (PDDocument document = new XmpUtilReader().loadWithAutomaticDecryption(filePath)) {
String firstPageContents = getFirstPageContents(document);
BibtexParser parser = new BibtexParser(importFormatPreferences);
result = parser.parseEntries(firstPageContents);
} catch (EncryptedPdfsNotSupportedException e) {
return ParserResult.fromErrorMessage(Localization.lang("Decryption not supported."));
} catch (IOException | ParseException e) {
return ParserResult.fromError(e);
}
result.forEach(entry -> entry.addFile(new LinkedFile("", filePath.toAbsolutePath(), "PDF")));
result.forEach(entry -> entry.setCommentsBeforeEntry(""));
return new ParserResult(result);
}
private String getFirstPageContents(PDDocument document) throws IOException {
PDFTextStripper stripper = new PDFTextStripper();
stripper.setStartPage(1);
stripper.setEndPage(1);
stripper.setSortByPosition(true);
stripper.setParagraphEnd(System.lineSeparator());
StringWriter writer = new StringWriter();
stripper.writeText(document, writer);
return writer.toString();
}
@Override
public String getName() {
return "PdfVerbatimBibText";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public String getDescription() {
return "PdfVerbatimBibTextImporter imports a verbatim BibTeX entry from the first page of the PDF.";
}
}
| 3,763 | 36.267327 | 132 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PdfXmpImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Objects;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.XmpPreferences;
import org.jabref.logic.xmp.XmpUtilReader;
import org.jabref.logic.xmp.XmpUtilShared;
/**
* Wraps the XMPUtility function to be used as an Importer.
*/
public class PdfXmpImporter extends Importer {
private final XmpPreferences xmpPreferences;
public PdfXmpImporter(XmpPreferences xmpPreferences) {
this.xmpPreferences = xmpPreferences;
}
@Override
public String getName() {
return Localization.lang("XMP-annotated PDF");
}
@Override
public StandardFileType getFileType() {
return StandardFileType.PDF;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
throw new UnsupportedOperationException(
"PdfXmpImporter does not support importDatabase(BufferedReader reader)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(String data) throws IOException {
Objects.requireNonNull(data);
throw new UnsupportedOperationException(
"PdfXmpImporter does not support importDatabase(String data)."
+ "Instead use importDatabase(Path filePath, Charset defaultEncoding).");
}
@Override
public ParserResult importDatabase(Path filePath) {
Objects.requireNonNull(filePath);
try {
return new ParserResult(new XmpUtilReader().readXmp(filePath, xmpPreferences));
} catch (IOException exception) {
return ParserResult.fromError(exception);
}
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
return false;
}
/**
* Returns whether the given stream contains data that is a.) a pdf and b.)
* contains at least one BibEntry.
*/
@Override
public boolean isRecognizedFormat(Path filePath) throws IOException {
Objects.requireNonNull(filePath);
return XmpUtilShared.hasMetadata(filePath, xmpPreferences);
}
@Override
public String getId() {
return "xmp";
}
@Override
public String getDescription() {
return "Wraps the XMPUtility function to be used as an Importer.";
}
}
| 2,736 | 29.752809 | 97 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/PicaXmlParser.java | package org.jabref.logic.importer.fileformat;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.Parser;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.StandardEntryType;
import com.google.common.base.Strings;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
public class PicaXmlParser implements Parser {
private static final Logger LOGGER = LoggerFactory.getLogger(PicaXmlParser.class);
@Override
public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException {
try {
DocumentBuilder dbuild = DocumentBuilderFactory.newInstance().newDocumentBuilder();
Document content = dbuild.parse(inputStream);
return this.parseEntries(content);
} catch (ParserConfigurationException | SAXException | IOException exception) {
throw new ParseException(exception);
}
}
private List<BibEntry> parseEntries(Document content) {
List<BibEntry> result = new LinkedList<>();
// used for creating test cases
// XMLUtil.printDocument(content);
// Namespace srwNamespace = Namespace.getNamespace("srw","http://www.loc.gov/zing/srw/");
// Schleife ueber alle Teilergebnisse
// Element root = content.getDocumentElement();
Element root = (Element) content.getElementsByTagName("zs:searchRetrieveResponse").item(0);
Element srwrecords = getChild("zs:records", root);
if (srwrecords == null) {
// no records found -> return empty list
return result;
}
List<Element> records = getChildren("zs:record", srwrecords);
for (Element gvkRecord : records) {
Element e = getChild("zs:recordData", gvkRecord);
if (e != null) {
e = getChild("record", e);
if (e != null) {
BibEntry bibEntry = parseEntry(e);
// TODO: Add filtering on years (based on org.jabref.logic.importer.fetcher.transformers.YearRangeByFilteringQueryTransformer.getStartYear)
result.add(bibEntry);
}
}
}
return result;
}
private BibEntry parseEntry(Element e) {
String author = null;
String editor = null;
String title = null;
String publisher = null;
String year = null;
String address = null;
String series = null;
String edition = null;
String isbn = null;
String issn = null;
String number = null;
String pagetotal = null;
String volume = null;
String pages = null;
String journal = null;
String ppn = null;
String booktitle = null;
String url = null;
String note = null;
String quelle = "";
String mak = "";
String subtitle = "";
EntryType entryType = StandardEntryType.Book; // Default
// Alle relevanten Informationen einsammeln
List<Element> datafields = getChildren("datafield", e);
for (Element datafield : datafields) {
String tag = datafield.getAttribute("tag");
LOGGER.debug("tag: " + tag);
// mak
if ("002@".equals(tag)) {
mak = getSubfield("0", datafield);
if (mak == null) {
mak = "";
}
}
// ppn
if ("003@".equals(tag)) {
ppn = getSubfield("0", datafield);
}
// author
if ("028A".equals(tag)) {
String vorname = getSubfield("d", datafield);
String nachname = getSubfield("a", datafield);
if (author == null) {
author = "";
} else {
author = author.concat(" and ");
}
author = author.concat(vorname + " " + nachname);
}
// author (weiterer)
if ("028B".equals(tag)) {
String vorname = getSubfield("d", datafield);
String nachname = getSubfield("a", datafield);
if (author == null) {
author = "";
} else {
author = author.concat(" and ");
}
author = author.concat(vorname + " " + nachname);
}
// editor
if ("028C".equals(tag)) {
String vorname = getSubfield("d", datafield);
String nachname = getSubfield("a", datafield);
if (editor == null) {
editor = "";
} else {
editor = editor.concat(" and ");
}
editor = editor.concat(vorname + " " + nachname);
}
// title and subtitle
if ("021A".equals(tag)) {
title = getSubfield("a", datafield);
subtitle = getSubfield("d", datafield);
}
// publisher and address
if ("033A".equals(tag)) {
publisher = getSubfield("n", datafield);
address = getSubfield("p", datafield);
}
// year
if ("011@".equals(tag)) {
year = getSubfield("a", datafield);
}
// year, volume, number, pages (year bei Zeitschriften (evtl. redundant mit 011@))
if ("031A".equals(tag)) {
year = getSubfield("j", datafield);
volume = getSubfield("e", datafield);
number = getSubfield("a", datafield);
pages = getSubfield("h", datafield);
}
// 036D seems to contain more information than the other fields
// overwrite information using that field
// 036D also contains information normally found in 036E
if ("036D".equals(tag)) {
// 021 might have been present
if (title != null) {
// convert old title (contained in "a" of 021A) to volume
if (title.startsWith("@")) {
// "@" indicates a number
title = title.substring(1);
}
number = title;
}
// title and subtitle
title = getSubfield("a", datafield);
subtitle = getSubfield("d", datafield);
volume = getSubfield("l", datafield);
}
// series and number
if ("036E".equals(tag)) {
series = getSubfield("a", datafield);
number = getSubfield("l", datafield);
String kor = getSubfield("b", datafield);
if (kor != null) {
series = series + " / " + kor;
}
}
// note
if ("037A".equals(tag)) {
note = getSubfield("a", datafield);
}
// edition
if ("032@".equals(tag)) {
edition = getSubfield("a", datafield);
}
// isbn
if ("004A".equals(tag)) {
final String isbn10 = getSubfield("0", datafield);
final String isbn13 = getSubfield("A", datafield);
if (isbn10 != null) {
isbn = isbn10;
}
if (isbn13 != null) {
isbn = isbn13;
}
}
// Hochschulschriftenvermerk
// Bei einer Verlagsdissertation ist der Ort schon eingetragen
if ("037C".equals(tag)) {
if (address == null) {
address = getSubfield("b", datafield);
if (address != null) {
address = removeSortCharacters(address);
}
}
String st = getSubfield("a", datafield);
if ((st != null) && st.contains("Diss")) {
entryType = StandardEntryType.PhdThesis;
}
}
// journal oder booktitle
/* Problematiken hier: Sowohl für Artikel in
* Zeitschriften als für Beiträge in Büchern
* wird 027D verwendet. Der Titel muß je nach
* Fall booktitle oder journal zugeordnet
* werden. Auch bei Zeitschriften werden hier
* ggf. Verlag und Ort angegeben (sind dann
* eigentlich überflüssig), während bei
* Buchbeiträgen Verlag und Ort wichtig sind
* (sonst in Kategorie 033A).
*/
if ("027D".equals(tag)) {
journal = getSubfield("a", datafield);
booktitle = getSubfield("a", datafield);
address = getSubfield("p", datafield);
publisher = getSubfield("n", datafield);
}
// pagetotal
if ("034D".equals(tag)) {
pagetotal = getSubfield("a", datafield);
if (pagetotal != null) {
// S, S. etc. entfernen
pagetotal = pagetotal.replaceAll(" S\\.?$", "");
}
}
// Behandlung von Konferenzen
if ("030F".equals(tag)) {
address = getSubfield("k", datafield);
if (!"proceedings".equals(entryType)) {
subtitle = getSubfield("a", datafield);
}
entryType = StandardEntryType.Proceedings;
}
// Wenn eine Verlagsdiss vorliegt
if (entryType.equals(StandardEntryType.PhdThesis) && (isbn != null)) {
entryType = StandardEntryType.Book;
}
// Hilfskategorien zur Entscheidung @article
// oder @incollection; hier könnte man auch die
// ISBN herausparsen als Erleichterung für das
// Auffinden der Quelle, die über die
// SRU-Schnittstelle gelieferten Daten zur
// Quelle unvollständig sind (z.B. nicht Serie
// und Nummer angegeben werden)
if ("039B".equals(tag)) {
quelle = getSubfield("8", datafield);
}
if ("046R".equals(tag) && ((quelle == null) || quelle.isEmpty())) {
quelle = getSubfield("a", datafield);
}
// URLs behandeln
if ("009P".equals(tag) && ("03".equals(datafield.getAttribute("occurrence"))
|| "05".equals(datafield.getAttribute("occurrence"))) && (url == null)) {
url = getSubfield("a", datafield);
}
}
// Abfangen von Nulleintraegen
if (quelle == null) {
quelle = "";
}
// Nichtsortierzeichen entfernen
if (author != null) {
author = removeSortCharacters(author);
}
if (editor != null) {
editor = removeSortCharacters(editor);
}
if (title != null) {
title = removeSortCharacters(title);
}
if (subtitle != null) {
subtitle = removeSortCharacters(subtitle);
}
// Dokumenttyp bestimmen und Eintrag anlegen
if (mak.startsWith("As")) {
entryType = BibEntry.DEFAULT_TYPE;
if (quelle.contains("ISBN")) {
entryType = StandardEntryType.InCollection;
}
if (quelle.contains("ZDB-ID")) {
entryType = StandardEntryType.Article;
}
} else if (mak.isEmpty()) {
entryType = BibEntry.DEFAULT_TYPE;
} else if (mak.startsWith("O")) {
entryType = BibEntry.DEFAULT_TYPE;
// FIXME: online only available in Biblatex
// entryType = "online";
}
/*
* Wahrscheinlichkeit, dass ZDB-ID
* vorhanden ist, ist größer als ISBN bei
* Buchbeiträgen. Daher bei As?-Sätzen am besten immer
* dann @incollection annehmen, wenn weder ISBN noch
* ZDB-ID vorhanden sind.
*/
BibEntry result = new BibEntry(entryType);
// Zuordnung der Felder in Abhängigkeit vom Dokumenttyp
if (author != null) {
result.setField(StandardField.AUTHOR, author);
}
if (editor != null) {
result.setField(StandardField.EDITOR, editor);
}
if (title != null) {
result.setField(StandardField.TITLE, title);
}
if (!Strings.isNullOrEmpty(subtitle)) {
// ensure that first letter is an upper case letter
// there could be the edge case that the string is only one character long, therefore, this special treatment
// this is Apache commons lang StringUtils.capitalize (https://commons.apache.org/proper/commons-lang/javadocs/api-release/org/apache/commons/lang3/StringUtils.html#capitalize%28java.lang.String%29), but we don't want to add an additional dependency ('org.apache.commons:commons-lang3:3.4')
StringBuilder newSubtitle = new StringBuilder(
Character.toString(Character.toUpperCase(subtitle.charAt(0))));
if (subtitle.length() > 1) {
newSubtitle.append(subtitle.substring(1));
}
result.setField(StandardField.SUBTITLE, newSubtitle.toString());
}
if (publisher != null) {
result.setField(StandardField.PUBLISHER, publisher);
}
if (year != null) {
result.setField(StandardField.YEAR, year);
}
if (address != null) {
result.setField(StandardField.ADDRESS, address);
}
if (series != null) {
result.setField(StandardField.SERIES, series);
}
if (edition != null) {
result.setField(StandardField.EDITION, edition);
}
if (isbn != null) {
result.setField(StandardField.ISBN, isbn);
}
if (issn != null) {
result.setField(StandardField.ISSN, issn);
}
if (number != null) {
result.setField(StandardField.NUMBER, number);
}
if (pagetotal != null) {
result.setField(StandardField.PAGETOTAL, pagetotal);
}
if (pages != null) {
result.setField(StandardField.PAGES, pages);
}
if (volume != null) {
result.setField(StandardField.VOLUME, volume);
}
if (journal != null) {
result.setField(StandardField.JOURNAL, journal);
}
if (ppn != null) {
result.setField(new UnknownField("ppn_GVK"), ppn);
}
if (url != null) {
result.setField(StandardField.URL, url);
}
if (note != null) {
result.setField(StandardField.NOTE, note);
}
if ("article".equals(entryType) && (journal != null)) {
result.setField(StandardField.JOURNAL, journal);
} else if ("incollection".equals(entryType) && (booktitle != null)) {
result.setField(StandardField.BOOKTITLE, booktitle);
}
return result;
}
private String getSubfield(String a, Element datafield) {
List<Element> liste = getChildren("subfield", datafield);
for (Element subfield : liste) {
if (subfield.getAttribute("code").equals(a)) {
return subfield.getTextContent();
}
}
return null;
}
private Element getChild(String name, Element e) {
if (e == null) {
return null;
}
NodeList children = e.getChildNodes();
int j = children.getLength();
for (int i = 0; i < j; i++) {
Node test = children.item(i);
if (test.getNodeType() == Node.ELEMENT_NODE) {
Element entry = (Element) test;
if (entry.getTagName().equals(name)) {
return entry;
}
}
}
return null;
}
private List<Element> getChildren(String name, Element e) {
List<Element> result = new LinkedList<>();
NodeList children = e.getChildNodes();
int j = children.getLength();
for (int i = 0; i < j; i++) {
Node test = children.item(i);
if (test.getNodeType() == Node.ELEMENT_NODE) {
Element entry = (Element) test;
if (entry.getTagName().equals(name)) {
result.add(entry);
}
}
}
return result;
}
private String removeSortCharacters(String input) {
return input.replaceAll("\\@", "");
}
}
| 17,435 | 34.153226 | 303 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/RepecNepImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.StandardEntryType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Imports a New Economics Papers-Message from the REPEC-NEP Service.
* <p>
* <p><a href="http://www.repec.org">RePEc (Research Papers in Economics)</a>
* is a collaborative effort of over 100 volunteers in 49 countries
* to enhance the dissemination of research in economics. The heart of
* the project is a decentralized database of working papers, journal
* articles and software components. All RePEc material is freely available.</p>
* At the time of writing RePEc holds over 300.000 items.</p>
* <p>
* <p><a href="http://nep.repec.org">NEP (New Economic Papers)</a> is an announcement
* service which filters information on new additions to RePEc into edited
* reports. The goal is to provide subscribers with up-to-date information
* to the research literature.</p>
* <p>
* <p>This importer is capable of importing NEP messages into JabRef.</p>
* <p>
* <p>There is no officially defined message format for NEP. NEP messages are assumed to have
* (and almost always have) the form given by the following semi-formal grammar:
* <pre>
* NEPMessage:
* MessageSection NEPMessage
* MessageSection
*
* MessageSection:
* OverviewMessageSection
* OtherMessageSection
*
* # we skip the overview
* OverviewMessageSection:
* 'In this issue we have: ' SectionSeparator OtherStuff
*
* OtherMessageSection:
* SectionSeparator OtherMessageSectionContent
*
* # we skip other stuff and read only full working paper references
* OtherMessageSectionContent:
* WorkingPaper EmptyLine OtherMessageSectionContent
* OtherStuff EmptyLine OtherMessageSectionContent
* ''
*
* OtherStuff:
* NonEmptyLine OtherStuff
* NonEmptyLine
*
* NonEmptyLine:
* a non-empty String that does not start with a number followed by a '.'
*
* # working papers are recognized by a number followed by a '.'
* # in a non-overview section
* WorkingPaper:
* Number'.' WhiteSpace TitleString EmptyLine Authors EmptyLine Abstract AdditionalFields
* Number'.' WhiteSpace TitleString AdditionalFields Abstract AdditionalFields
*
* TitleString:
* a String that may span several lines and should be joined
*
* # there must be at least one author
* Authors:
* Author '\n' Authors
* Author '\n'
*
* # optionally, an institution is given for an author
* Author:
* AuthorName
* AuthorName '(' Institution ')'
*
* # there are no rules about the name, it may be firstname lastname or lastname, firstname or anything else
* AuthorName:
* a non-empty String without '(' or ')' characters, not spanning more that one line
*
* Institution:
* a non-empty String that may span several lines
*
* Abstract:
* a (possibly empty) String that may span several lines
*
* AdditionalFields:
* AdditionalField '\n' AdditionalFields
* EmptyLine AdditionalFields
* ''
*
* AdditionalField:
* 'Keywords:' KeywordList
* 'URL:' non-empty String
* 'Date:' DateString
* 'JEL:' JelClassificationList
* 'By': Authors
*
* KeywordList:
* Keyword ',' KeywordList
* Keyword ';' KeywordList
* Keyword
*
* Keyword:
* non-empty String that does not contain ',' (may contain whitespace)
*
* # if no date is given, the current year as given by the system clock is assumed
* DateString:
* 'yyyy-MM-dd'
* 'yyyy-MM'
* 'yyyy'
*
* JelClassificationList:
* JelClassification JelClassificationList
* JelClassification
*
* # the JEL Classifications are set into a new BIBTEX-field 'jel'
* # they will appear if you add it as a field to one of the BIBTex Entry sections
* JelClassification:
* one of the allowed classes, see http://ideas.repec.org/j/
*
* SectionSeparator:
* '\n-----------------------------'
* </pre>
* </p>
*/
public class RepecNepImporter extends Importer {
private static final Logger LOGGER = LoggerFactory.getLogger(RepecNepImporter.class);
private static final Collection<String> RECOGNIZED_FIELDS = Arrays.asList("Keywords", "JEL", "Date", "URL", "By");
private final ImportFormatPreferences importFormatPreferences;
private int line;
private String lastLine = "";
private String preLine = "";
private boolean inOverviewSection;
public RepecNepImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
@Override
public String getName() {
return "REPEC New Economic Papers (NEP)";
}
@Override
public String getId() {
return "repecnep";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.TXT;
}
@Override
public String getDescription() {
return "Imports a New Economics Papers-Message from the REPEC-NEP Service.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// read the first couple of lines
// NEP message usually contain the String 'NEP: New Economics Papers'
// or, they are from nep.repec.org
StringBuilder startOfMessage = new StringBuilder();
String tmpLine = reader.readLine();
for (int i = 0; (i < 25) && (tmpLine != null); i++) {
startOfMessage.append(tmpLine);
tmpLine = reader.readLine();
}
return startOfMessage.toString().contains("NEP: New Economics Papers") || startOfMessage.toString().contains(
"nep.repec.org");
}
private boolean startsWithKeyword(Collection<String> keywords) {
boolean result = this.lastLine.indexOf(':') >= 1;
if (result) {
String possibleKeyword = this.lastLine.substring(0, this.lastLine.indexOf(':'));
result = keywords.contains(possibleKeyword);
}
return result;
}
private void readLine(BufferedReader in) throws IOException {
this.line++;
this.preLine = this.lastLine;
this.lastLine = in.readLine();
}
/**
* Read multiple lines.
* <p>
* <p>Reads multiple lines until either
* <ul>
* <li>an empty line</li>
* <li>the end of file</li>
* <li>the next working paper or</li>
* <li>a keyword</li>
* </ul>
* is found. Whitespace at start or end of lines is trimmed except for one blank character.</p>
*
* @return result
*/
private String readMultipleLines(BufferedReader in) throws IOException {
StringBuilder result = new StringBuilder(this.lastLine.trim());
readLine(in);
while ((this.lastLine != null) && !"".equals(this.lastLine.trim()) && !startsWithKeyword(RepecNepImporter.RECOGNIZED_FIELDS) && !isStartOfWorkingPaper()) {
result.append(this.lastLine.isEmpty() ? this.lastLine.trim() : " " + this.lastLine.trim());
readLine(in);
}
return result.toString();
}
/**
* Implements grammar rule "TitleString".
*
* @throws IOException
*/
private void parseTitleString(BibEntry be, BufferedReader in) throws IOException {
// skip article number
this.lastLine = this.lastLine.substring(this.lastLine.indexOf('.') + 1);
be.setField(StandardField.TITLE, readMultipleLines(in));
}
/**
* Implements grammar rule "Authors"
*
* @throws IOException
*/
private void parseAuthors(BibEntry be, BufferedReader in) throws IOException {
// read authors and institutions
List<String> authors = new ArrayList<>();
StringBuilder institutions = new StringBuilder();
while ((this.lastLine != null) && !"".equals(this.lastLine) && !startsWithKeyword(RepecNepImporter.RECOGNIZED_FIELDS)) {
// read single author
String author;
StringBuilder institution = new StringBuilder();
boolean institutionDone;
if (this.lastLine.indexOf('(') >= 0) {
author = this.lastLine.substring(0, this.lastLine.indexOf('(')).trim();
institutionDone = this.lastLine.indexOf(')') >= 1;
institution
.append(this.lastLine.substring(this.lastLine.indexOf('(') + 1,
institutionDone && (this.lastLine
.indexOf(')') > (this.lastLine.indexOf('(') + 1)) ? this.lastLine
.indexOf(')') : this.lastLine.length())
.trim());
} else {
author = this.lastLine.trim();
institutionDone = true;
}
readLine(in);
while (!institutionDone && (this.lastLine != null)) {
institutionDone = this.lastLine.indexOf(')') >= 1;
institution.append(this.lastLine
.substring(0, institutionDone ? this.lastLine.indexOf(')') : this.lastLine.length()).trim());
readLine(in);
}
authors.add(author);
if (institution.length() > 0) {
institutions.append(
(institutions.length() == 0) ? institution.toString() : " and " + institution.toString());
}
}
if (!authors.isEmpty()) {
be.setField(StandardField.AUTHOR, String.join(" and ", authors));
}
if (institutions.length() > 0) {
be.setField(StandardField.INSTITUTION, institutions.toString());
}
}
/**
* Implements grammar rule "Abstract".
*
* @throws IOException
*/
private void parseAbstract(BibEntry be, BufferedReader in) throws IOException {
String theabstract = readMultipleLines(in);
if (!"".equals(theabstract)) {
be.setField(StandardField.ABSTRACT, theabstract);
}
}
/**
* Implements grammar rule "AdditionalFields".
*
* @throws IOException
*/
private void parseAdditionalFields(BibEntry be, boolean multilineUrlFieldAllowed, BufferedReader in)
throws IOException {
// one empty line is possible before fields start
if ((this.lastLine != null) && "".equals(this.lastLine.trim())) {
readLine(in);
}
// read other fields
while ((this.lastLine != null) && !isStartOfWorkingPaper() && (startsWithKeyword(RepecNepImporter.RECOGNIZED_FIELDS) || "".equals(this.lastLine))) {
// if multiple lines for a field are allowed and field consists of multiple lines, join them
String keyword = "".equals(this.lastLine) ? "" : this.lastLine.substring(0, this.lastLine.indexOf(':')).trim();
// skip keyword
this.lastLine = "".equals(this.lastLine) ? "" : this.lastLine.substring(this.lastLine.indexOf(':') + 1).trim();
if ("Keywords".equals(keyword)) {
// parse keywords field
String content = readMultipleLines(in);
String[] keywords = content.split("[,;]");
be.addKeywords(Arrays.asList(keywords),
importFormatPreferences.bibEntryPreferences().getKeywordSeparator());
} else if ("JEL".equals(keyword)) {
// parse JEL field
be.setField(new UnknownField("jel"), readMultipleLines(in));
} else if (keyword.startsWith("Date")) {
// parse date field
String content = readMultipleLines(in);
Date.parse(content).ifPresent(be::setDate);
} else if (keyword.startsWith("URL")) {
// parse URL field
String content;
if (multilineUrlFieldAllowed) {
content = readMultipleLines(in);
} else {
content = this.lastLine;
readLine(in);
}
be.setField(StandardField.URL, content);
} else if (keyword.startsWith("By")) {
// parse authors field
parseAuthors(be, in);
} else {
readLine(in);
}
}
}
/**
* if line starts with a string of the form 'x. ' and we are not in the overview
* section, we have a working paper entry we are interested in
*/
private boolean isStartOfWorkingPaper() {
return this.lastLine.matches("\\d+\\.\\s.*") && !this.inOverviewSection && "".equals(this.preLine.trim());
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
Objects.requireNonNull(reader);
List<BibEntry> bibitems = new ArrayList<>();
String paperNoStr = null;
this.line = 0;
try {
readLine(reader); // skip header and editor information
while (this.lastLine != null) {
if (this.lastLine.startsWith("-----------------------------")) {
this.inOverviewSection = this.preLine.startsWith("In this issue we have");
}
if (isStartOfWorkingPaper()) {
BibEntry be = new BibEntry(StandardEntryType.TechReport);
paperNoStr = this.lastLine.substring(0, this.lastLine.indexOf('.'));
parseTitleString(be, reader);
if (startsWithKeyword(RepecNepImporter.RECOGNIZED_FIELDS)) {
parseAdditionalFields(be, false, reader);
} else {
readLine(reader); // skip empty line
parseAuthors(be, reader);
readLine(reader); // skip empty line
}
if (!startsWithKeyword(RepecNepImporter.RECOGNIZED_FIELDS)) {
parseAbstract(be, reader);
}
parseAdditionalFields(be, true, reader);
bibitems.add(be);
paperNoStr = null;
} else {
this.preLine = this.lastLine;
readLine(reader);
}
}
} catch (Exception e) {
String message = "Error in REPEC-NEP import on line " + this.line;
if (paperNoStr != null) {
message += ", paper no. " + paperNoStr + ": ";
}
message += e.getLocalizedMessage();
LOGGER.error(message, e);
return ParserResult.fromErrorMessage(message);
}
return new ParserResult(bibitems);
}
}
| 15,418 | 36.154217 | 163 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/RisImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.time.Year;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.IEEETranEntryType;
import org.jabref.model.entry.types.StandardEntryType;
public class RisImporter extends Importer {
private static final Pattern RECOGNIZED_FORMAT_PATTERN = Pattern.compile("TY - .*");
private static DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy");
@Override
public String getName() {
return "RIS";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.RIS;
}
@Override
public String getDescription() {
return "Imports a Biblioscape Tag File.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// Our strategy is to look for the "TY - *" line.
return reader.lines().anyMatch(line -> RECOGNIZED_FORMAT_PATTERN.matcher(line).find());
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
// use optional here, so that no exception will be thrown if the file is empty
String linesAsString = reader.lines().reduce((line, nextline) -> line + "\n" + nextline).orElse("");
String[] entries = linesAsString.replace("\u2013", "-").replace("\u2014", "--").replace("\u2015", "--")
.split("ER -.*(\\n)*");
// stores all the date tags from highest to lowest priority
List<String> dateTags = Arrays.asList("Y1", "PY", "DA", "Y2");
for (String entry1 : entries) {
String dateTag = "";
String dateValue = "";
int datePriority = dateTags.size();
int tagPriority;
EntryType type = StandardEntryType.Misc;
String author = "";
String editor = "";
String startPage = "";
String endPage = "";
String comment = "";
Optional<Month> month = Optional.empty();
Map<Field, String> fields = new HashMap<>();
String[] lines = entry1.split("\n");
for (int j = 0; j < lines.length; j++) {
StringBuilder current = new StringBuilder(lines[j]);
boolean done = false;
while (!done && (j < (lines.length - 1))) {
if ((lines[j + 1].length() >= 6) && !" - ".equals(lines[j + 1].substring(2, 6))) {
if ((current.length() > 0) && !Character.isWhitespace(current.charAt(current.length() - 1))
&& !Character.isWhitespace(lines[j + 1].charAt(0))) {
current.append(' ');
}
current.append(lines[j + 1]);
j++;
} else {
done = true;
}
}
String entry = current.toString();
if (entry.length() < 6) {
continue;
} else {
String tag = entry.substring(0, 2);
String value = entry.substring(6).trim();
if ("TY".equals(tag)) {
if ("BOOK".equals(value)) {
type = StandardEntryType.Book;
} else if ("JOUR".equals(value) || "MGZN".equals(value)) {
type = StandardEntryType.Article;
} else if ("THES".equals(value)) {
type = StandardEntryType.PhdThesis;
} else if ("UNPB".equals(value)) {
type = StandardEntryType.Unpublished;
} else if ("RPRT".equals(value)) {
type = StandardEntryType.TechReport;
} else if ("CONF".equals(value)) {
type = StandardEntryType.InProceedings;
} else if ("CHAP".equals(value)) {
type = StandardEntryType.InCollection;
} else if ("PAT".equals(value)) {
type = IEEETranEntryType.Patent;
} else {
type = StandardEntryType.Misc;
}
} else if ("T1".equals(tag) || "TI".equals(tag)) {
String oldVal = fields.get(StandardField.TITLE);
if (oldVal == null) {
fields.put(StandardField.TITLE, value);
} else {
if (oldVal.endsWith(":") || oldVal.endsWith(".") || oldVal.endsWith("?")) {
fields.put(StandardField.TITLE, oldVal + " " + value);
} else {
fields.put(StandardField.TITLE, oldVal + ": " + value);
}
}
fields.put(StandardField.TITLE, fields.get(StandardField.TITLE).replaceAll("\\s+", " ")); // Normalize whitespaces
} else if ("BT".equals(tag)) {
fields.put(StandardField.BOOKTITLE, value);
} else if (("T2".equals(tag) || "J2".equals(tag) || "JA".equals(tag)) && ((fields.get(StandardField.JOURNAL) == null) || "".equals(fields.get(StandardField.JOURNAL)))) {
// if there is no journal title, then put second title as journal title
fields.put(StandardField.JOURNAL, value);
} else if ("JO".equals(tag) || "J1".equals(tag) || "JF".equals(tag)) {
// if this field appears then this should be the journal title
fields.put(StandardField.JOURNAL, value);
} else if ("T3".equals(tag)) {
fields.put(StandardField.SERIES, value);
} else if ("AU".equals(tag) || "A1".equals(tag) || "A2".equals(tag) || "A3".equals(tag) || "A4".equals(tag)) {
if ("".equals(author)) {
author = value;
} else {
author += " and " + value;
}
} else if ("ED".equals(tag)) {
if (editor.isEmpty()) {
editor = value;
} else {
editor += " and " + value;
}
} else if ("JA".equals(tag) || "JF".equals(tag)) {
if (type.equals(StandardEntryType.InProceedings)) {
fields.put(StandardField.BOOKTITLE, value);
} else {
fields.put(StandardField.JOURNAL, value);
}
} else if ("LA".equals(tag)) {
fields.put(StandardField.LANGUAGE, value);
} else if ("CA".equals(tag)) {
fields.put(new UnknownField("caption"), value);
} else if ("DB".equals(tag)) {
fields.put(new UnknownField("database"), value);
} else if ("IS".equals(tag) || "AN".equals(tag) || "C7".equals(tag) || "M1".equals(tag)) {
fields.put(StandardField.NUMBER, value);
} else if ("SP".equals(tag)) {
startPage = value;
} else if ("PB".equals(tag)) {
if (type.equals(StandardEntryType.PhdThesis)) {
fields.put(StandardField.SCHOOL, value);
} else {
fields.put(StandardField.PUBLISHER, value);
}
} else if ("AD".equals(tag) || "CY".equals(tag) || "PP".equals(tag)) {
fields.put(StandardField.ADDRESS, value);
} else if ("EP".equals(tag)) {
endPage = value;
if (!endPage.isEmpty()) {
endPage = "--" + endPage;
}
} else if ("ET".equals(tag)) {
fields.put(StandardField.EDITION, value);
} else if ("SN".equals(tag)) {
fields.put(StandardField.ISSN, value);
} else if ("VL".equals(tag)) {
fields.put(StandardField.VOLUME, value);
} else if ("N2".equals(tag) || "AB".equals(tag)) {
String oldAb = fields.get(StandardField.ABSTRACT);
if (oldAb == null) {
fields.put(StandardField.ABSTRACT, value);
} else if (!oldAb.equals(value) && !value.isEmpty()) {
fields.put(StandardField.ABSTRACT, oldAb + OS.NEWLINE + value);
}
} else if ("UR".equals(tag) || "L2".equals(tag) || "LK".equals(tag)) {
fields.put(StandardField.URL, value);
} else if (((tagPriority = dateTags.indexOf(tag)) != -1) && (value.length() >= 4)) {
if (tagPriority < datePriority) {
String year = value.substring(0, 4);
try {
Year.parse(year, formatter);
// if the year is parsebale we have found a higher priority date
dateTag = tag;
dateValue = value;
datePriority = tagPriority;
} catch (DateTimeParseException ex) {
// We can't parse the year, we ignore it
}
}
} else if ("KW".equals(tag)) {
if (fields.containsKey(StandardField.KEYWORDS)) {
String kw = fields.get(StandardField.KEYWORDS);
fields.put(StandardField.KEYWORDS, kw + ", " + value);
} else {
fields.put(StandardField.KEYWORDS, value);
}
} else if ("U1".equals(tag) || "U2".equals(tag) || "N1".equals(tag)) {
if (!comment.isEmpty()) {
comment = comment + OS.NEWLINE;
}
comment = comment + value;
} else if ("M3".equals(tag) || "DO".equals(tag)) {
addDoi(fields, value);
} else if ("C3".equals(tag)) {
fields.put(StandardField.EVENTTITLE, value);
} else if ("N1".equals(tag) || "RN".equals(tag)) {
fields.put(StandardField.NOTE, value);
} else if ("ST".equals(tag)) {
fields.put(StandardField.SHORTTITLE, value);
} else if ("C2".equals(tag)) {
fields.put(StandardField.EPRINT, value);
fields.put(StandardField.EPRINTTYPE, "pubmed");
} else if ("TA".equals(tag)) {
fields.put(StandardField.TRANSLATOR, value);
// fields for which there is no direct mapping in the bibtext standard
} else if ("AV".equals(tag)) {
fields.put(new UnknownField("archive_location"), value);
} else if ("CN".equals(tag) || "VO".equals(tag)) {
fields.put(new UnknownField("call-number"), value);
} else if ("DB".equals(tag)) {
fields.put(new UnknownField("archive"), value);
} else if ("NV".equals(tag)) {
fields.put(new UnknownField("number-of-volumes"), value);
} else if ("OP".equals(tag)) {
fields.put(new UnknownField("original-title"), value);
} else if ("RI".equals(tag)) {
fields.put(new UnknownField("reviewed-title"), value);
} else if ("RP".equals(tag)) {
fields.put(new UnknownField("status"), value);
} else if ("SE".equals(tag)) {
fields.put(new UnknownField("section"), value);
} else if ("ID".equals(tag)) {
fields.put(new UnknownField("refid"), value);
}
}
// fix authors
if (!author.isEmpty()) {
author = AuthorList.fixAuthorLastNameFirst(author);
fields.put(StandardField.AUTHOR, author);
}
if (!editor.isEmpty()) {
editor = AuthorList.fixAuthorLastNameFirst(editor);
fields.put(StandardField.EDITOR, editor);
}
if (!comment.isEmpty()) {
fields.put(StandardField.COMMENT, comment);
}
fields.put(StandardField.PAGES, startPage + endPage);
}
// if we found a date
if (dateTag.length() > 0) {
fields.put(StandardField.YEAR, dateValue.substring(0, 4));
String[] parts = dateValue.split("/");
if ((parts.length > 1) && !parts[1].isEmpty()) {
try {
int monthNumber = Integer.parseInt(parts[1]);
month = Month.getMonthByNumber(monthNumber);
} catch (NumberFormatException ex) {
// The month part is unparseable, so we ignore it.
}
}
}
// Remove empty fields:
fields.entrySet().removeIf(key -> (key.getValue() == null) || key.getValue().trim().isEmpty());
// create one here
// type is set in the loop above
BibEntry entry = new BibEntry(type);
entry.setField(fields);
// month has a special treatment as we use the separate method "setMonth" of BibEntry instead of directly setting the value
month.ifPresent(entry::setMonth);
bibitems.add(entry);
}
return new ParserResult(bibitems);
}
private void addDoi(Map<Field, String> hm, String val) {
Optional<DOI> parsedDoi = DOI.parse(val);
parsedDoi.ifPresent(doi -> hm.put(StandardField.DOI, doi.getDOI()));
}
}
| 15,707 | 48.55205 | 189 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/SilverPlatterImporter.java | package org.jabref.logic.importer.fileformat;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Pattern;
import org.jabref.logic.importer.Importer;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.EntryTypeFactory;
import org.jabref.model.entry.types.StandardEntryType;
/**
* Imports a SilverPlatter exported file. This is a poor format to parse, so it currently doesn't handle everything correctly.
*/
public class SilverPlatterImporter extends Importer {
private static final Pattern START_PATTERN = Pattern.compile("Record.*INSPEC.*");
@Override
public String getName() {
return "SilverPlatter";
}
@Override
public StandardFileType getFileType() {
return StandardFileType.SILVER_PLATTER;
}
@Override
public String getDescription() {
return "Imports a SilverPlatter exported file.";
}
@Override
public boolean isRecognizedFormat(BufferedReader reader) throws IOException {
// This format is very similar to Inspec, so we have a two-fold strategy:
// If we see the flag signaling that it is an Inspec file, return false.
// This flag should appear above the first entry and prevent us from
// accepting the Inspec format. Then we look for the title entry.
String str;
while ((str = reader.readLine()) != null) {
if (START_PATTERN.matcher(str).find()) {
return false; // This is an Inspec file, so return false.
}
if ((str.length() >= 5) && "TI: ".equals(str.substring(0, 5))) {
return true;
}
}
return false;
}
@Override
public ParserResult importDatabase(BufferedReader reader) throws IOException {
List<BibEntry> bibitems = new ArrayList<>();
boolean isChapter = false;
String str;
StringBuilder sb = new StringBuilder();
while ((str = reader.readLine()) != null) {
if (str.length() < 2) {
sb.append("__::__").append(str);
} else {
sb.append("__NEWFIELD__").append(str);
}
}
String[] entries = sb.toString().split("__::__");
EntryType type = StandardEntryType.Misc;
Map<Field, String> h = new HashMap<>();
for (String entry : entries) {
if (entry.trim().length() < 6) {
continue;
}
h.clear();
String[] fields = entry.split("__NEWFIELD__");
for (String field : fields) {
if (field.length() < 6) {
continue;
}
String f3 = field.substring(0, 2);
String frest = field.substring(5);
if ("TI".equals(f3)) {
h.put(StandardField.TITLE, frest);
} else if ("AU".equals(f3)) {
if (frest.trim().endsWith("(ed)")) {
String ed = frest.trim();
ed = ed.substring(0, ed.length() - 4);
h.put(StandardField.EDITOR,
AuthorList.fixAuthorLastNameFirst(ed.replace(",-", ", ").replace(";", " and ")));
} else {
h.put(StandardField.AUTHOR,
AuthorList.fixAuthorLastNameFirst(frest.replace(",-", ", ").replace(";", " and ")));
}
} else if ("AB".equals(f3)) {
h.put(StandardField.ABSTRACT, frest);
} else if ("DE".equals(f3)) {
String kw = frest.replace("-;", ",").toLowerCase(Locale.ROOT);
h.put(StandardField.KEYWORDS, kw.substring(0, kw.length() - 1));
} else if ("SO".equals(f3)) {
int m = frest.indexOf('.');
if (m >= 0) {
String jr = frest.substring(0, m);
h.put(StandardField.JOURNAL, jr.replace("-", " "));
frest = frest.substring(m);
m = frest.indexOf(';');
if (m >= 5) {
String yr = frest.substring(m - 5, m).trim();
h.put(StandardField.YEAR, yr);
frest = frest.substring(m);
m = frest.indexOf(':');
int issueIndex = frest.indexOf('(');
int endIssueIndex = frest.indexOf(')');
if (m >= 0) {
String pg = frest.substring(m + 1).trim();
h.put(StandardField.PAGES, pg);
h.put(StandardField.VOLUME, frest.substring(1, issueIndex).trim());
h.put(StandardField.ISSUE, frest.substring(issueIndex + 1, endIssueIndex).trim());
}
}
}
} else if ("PB".equals(f3)) {
int m = frest.indexOf(':');
if (m >= 0) {
String jr = frest.substring(0, m);
h.put(StandardField.PUBLISHER, jr.replace("-", " ").trim());
frest = frest.substring(m);
m = frest.indexOf(", ");
if ((m + 2) < frest.length()) {
String yr = frest.substring(m + 2).trim();
try {
Integer.parseInt(yr);
h.put(StandardField.YEAR, yr);
} catch (NumberFormatException ex) {
// Let's assume that this wasn't a number, since it
// couldn't be parsed as an integer.
}
}
}
} else if ("AF".equals(f3)) {
h.put(StandardField.SCHOOL, frest.trim());
} else if ("DT".equals(f3)) {
frest = frest.trim();
if ("Monograph".equals(frest)) {
type = StandardEntryType.Book;
} else if (frest.startsWith("Dissertation")) {
type = StandardEntryType.PhdThesis;
} else if (frest.toLowerCase(Locale.ROOT).contains(StandardField.JOURNAL.getName())) {
type = StandardEntryType.Article;
} else if ("Contribution".equals(frest) || "Chapter".equals(frest)) {
type = StandardEntryType.InCollection;
// This entry type contains page numbers and booktitle in the
// title field.
isChapter = true;
} else {
type = EntryTypeFactory.parse(frest.replace(" ", ""));
}
}
}
if (isChapter) {
String titleO = h.get(StandardField.TITLE);
if (titleO != null) {
String title = titleO.trim();
int inPos = title.indexOf("\" in ");
if (inPos > 1) {
h.put(StandardField.TITLE, title.substring(0, inPos));
}
}
}
BibEntry b = new BibEntry(type);
// create one here
b.setField(h);
bibitems.add(b);
}
return new ParserResult(bibitems);
}
}
| 8,081 | 41.314136 | 126 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/medline/ArticleId.java | package org.jabref.logic.importer.fileformat.medline;
public record ArticleId(
String idType,
String content
) {
}
| 132 | 15.625 | 53 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/medline/Investigator.java | package org.jabref.logic.importer.fileformat.medline;
import java.util.List;
public record Investigator(
String lastName,
String foreName,
List<String> affiliationList
) {
}
| 200 | 17.272727 | 53 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/medline/MeshHeading.java | package org.jabref.logic.importer.fileformat.medline;
import java.util.List;
public record MeshHeading(
String descriptorName,
List<String> qualifierNames
) {
}
| 179 | 17 | 53 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/medline/OtherId.java | package org.jabref.logic.importer.fileformat.medline;
public record OtherId(
String source,
String content
) {
}
| 130 | 15.375 | 53 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/medline/PersonalNameSubject.java | package org.jabref.logic.importer.fileformat.medline;
public record PersonalNameSubject(
String lastName,
String foreName
) {
}
| 145 | 17.25 | 53 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/mods/Identifier.java | package org.jabref.logic.importer.fileformat.mods;
public record Identifier(
String type,
String value) {
}
| 125 | 17 | 50 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/mods/Name.java | package org.jabref.logic.importer.fileformat.mods;
public record Name(
String value,
String type) {
}
| 119 | 16.142857 | 50 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/mods/RecordInfo.java | package org.jabref.logic.importer.fileformat.mods;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
public record RecordInfo(
List<String> recordContents,
List<String> languages) {
public static Set<String> elementNameSet = Set.of(
"recordContentSource",
"recordCreationDate",
"recordChangeDate",
"recordIdentifier",
"recordOrigin",
"descriptionStandard",
"recordInfoNote"
);
public RecordInfo() {
this(new ArrayList<>(), new ArrayList<>());
}
}
| 598 | 22.96 | 54 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/fileformat/mods/package-info.java | //
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2016.09.28 at 06:51:34 PM CEST
//
// This needs to be in the src/main/java to ensure that the Namespace is mapped to the prefix "mods"
// this cannot be done by a gradle task at the moment
@jakarta.xml.bind.annotation.XmlSchema(namespace = "http://www.loc.gov/mods/v3", xmlns = {
@jakarta.xml.bind.annotation.XmlNs(prefix = "mods", namespaceURI = "http://www.loc.gov/mods/v3")}, elementFormDefault = jakarta.xml.bind.annotation.XmlNsForm.QUALIFIED)
package org.jabref.logic.importer.fileformat.mods;
| 806 | 56.642857 | 176 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/FileFieldParser.java | package org.jabref.logic.importer.util;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import org.jabref.model.entry.LinkedFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FileFieldParser {
private static final Logger LOGGER = LoggerFactory.getLogger(FileFieldParser.class);
private final String value;
private StringBuilder charactersOfCurrentElement;
private boolean windowsPath;
public FileFieldParser(String value) {
if (value == null) {
this.value = null;
} else {
this.value = value.replace("$\\backslash$", "\\");
}
}
/**
* Converts the string representation of LinkedFileData to a List of LinkedFile
*
* The syntax of one element is description:path:type
* Multiple elements are concatenated with ;
*
* The main challenges of the implementation are:
*
* <ul>
* <li>that XML characters might be included (thus one cannot simply split on ";")</li>
* <li>some characters might be escaped</li>
* <li>Windows absolute paths might be included without escaping</li>
* </ul>
*/
public static List<LinkedFile> parse(String value) {
// We need state to have a more clean code. Thus, we instantiate the class and then return the result
FileFieldParser fileFieldParser = new FileFieldParser(value);
return fileFieldParser.parse();
}
public List<LinkedFile> parse() {
List<LinkedFile> files = new ArrayList<>();
if ((value == null) || value.trim().isEmpty()) {
return files;
}
if (LinkedFile.isOnlineLink(value.trim())) {
// needs to be modifiable
try {
return List.of(new LinkedFile(new URL(value), ""));
} catch (MalformedURLException e) {
LOGGER.error("invalid url", e);
return files;
}
}
// data of each LinkedFile as split string
List<String> linkedFileData = new ArrayList<>();
resetDataStructuresForNextElement();
boolean inXmlChar = false;
boolean escaped = false;
for (int i = 0; i < value.length(); i++) {
char c = value.charAt(i);
if (!escaped && (c == '\\')) {
if (windowsPath) {
charactersOfCurrentElement.append(c);
continue;
} else {
escaped = true;
continue;
}
} else if (!escaped && (c == '&') && !inXmlChar) {
// Check if we are entering an XML special character construct such
// as ",", because we need to know in order to ignore the semicolon.
charactersOfCurrentElement.append(c);
if ((value.length() > (i + 1)) && (value.charAt(i + 1) == '#')) {
inXmlChar = true;
}
} else if (!escaped && inXmlChar && (c == ';')) {
// Check if we are exiting an XML special character construct:
charactersOfCurrentElement.append(c);
inXmlChar = false;
} else if (!escaped && (c == ':')) {
if ((linkedFileData.size() == 1) && // we already parsed the description
(charactersOfCurrentElement.length() == 1)) { // we parsed one character
// special case of Windows paths
// Example: ":c:\test.pdf:PDF"
// We are at the second : (position 3 in the example) and "just" add it to the current element
charactersOfCurrentElement.append(c);
windowsPath = true;
} else {
// We are in the next LinkedFile data element
linkedFileData.add(charactersOfCurrentElement.toString());
resetDataStructuresForNextElement();
}
} else if (!escaped && (c == ';') && !inXmlChar) {
linkedFileData.add(charactersOfCurrentElement.toString());
files.add(convert(linkedFileData));
// next iteration
resetDataStructuresForNextElement();
} else {
charactersOfCurrentElement.append(c);
}
escaped = false;
}
if (charactersOfCurrentElement.length() > 0) {
linkedFileData.add(charactersOfCurrentElement.toString());
}
if (!linkedFileData.isEmpty()) {
files.add(convert(linkedFileData));
}
return files;
}
private void resetDataStructuresForNextElement() {
charactersOfCurrentElement = new StringBuilder();
windowsPath = false;
}
/**
* Converts the given textual representation of a LinkedFile object
*
* SIDE EFFECT: The given entry list is cleared upon completion
*
* @param entry the list of elements in the linked file textual representation
* @return a LinkedFile object
*/
static LinkedFile convert(List<String> entry) {
// ensure list has at least 3 fields
while (entry.size() < 3) {
entry.add("");
}
LinkedFile field = null;
if (LinkedFile.isOnlineLink(entry.get(1))) {
try {
field = new LinkedFile(entry.get(0), new URL(entry.get(1)), entry.get(2));
} catch (MalformedURLException e) {
// in case the URL is malformed, store it nevertheless
field = new LinkedFile(entry.get(0), entry.get(1), entry.get(2));
}
}
if (field == null) {
String pathStr = entry.get(1);
if (pathStr.contains("//")) {
// In case the path contains //, we assume it is a malformed URL, not a malformed path.
// On linux, the double slash would be converted to a single slash.
field = new LinkedFile(entry.get(0), pathStr, entry.get(2));
} else {
try {
// there is no Path.isValidPath(String) method
Path path = Path.of(pathStr);
field = new LinkedFile(entry.get(0), path, entry.get(2));
} catch (InvalidPathException e) {
// Ignored
LOGGER.debug("Invalid path object, continueing with string", e);
field = new LinkedFile(entry.get(0), pathStr, entry.get(2));
}
}
}
// link is the only mandatory field
if (field.getDescription().isEmpty() && field.getLink().isEmpty() && !field.getFileType().isEmpty()) {
field = new LinkedFile("", Path.of(field.getFileType()), "");
} else if (!field.getDescription().isEmpty() && field.getLink().isEmpty() && field.getFileType().isEmpty()) {
field = new LinkedFile("", Path.of(field.getDescription()), "");
}
entry.clear();
return field;
}
}
| 7,233 | 37.478723 | 117 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/GrobidService.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.importer.fetcher.GrobidPreferences;
import org.jabref.logic.importer.fileformat.BibtexParser;
import org.jabref.model.entry.BibEntry;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
/**
* Implements an API to a GROBID server, as described at
* https://grobid.readthedocs.io/en/latest/Grobid-service/#grobid-web-services
* <p>
* Note: Currently a custom GROBID server is used...
* https://github.com/NikodemKch/grobid
* <p>
* The methods are structured to match the GROBID server api.
* Each method corresponds to a GROBID service request. Only the ones already used are already implemented.
*/
public class GrobidService {
public enum ConsolidateCitations {
NO(0), WITH_METADATA(1), WITH_DOI_ONLY(2);
private final int code;
ConsolidateCitations(int code) {
this.code = code;
}
public int getCode() {
return this.code;
}
}
private final GrobidPreferences grobidPreferences;
public GrobidService(GrobidPreferences grobidPreferences) {
this.grobidPreferences = grobidPreferences;
if (!grobidPreferences.isGrobidEnabled()) {
throw new UnsupportedOperationException("Grobid was used but not enabled.");
}
}
/**
* Calls the Grobid server for converting the citation into a BibEntry
*
* @return A BibEntry for the String
* @throws IOException if an I/O exception during the call occurred or no BibTeX entry could be determined
*/
public Optional<BibEntry> processCitation(String rawCitation, ImportFormatPreferences importFormatPreferences, ConsolidateCitations consolidateCitations) throws IOException, ParseException {
Connection.Response response = Jsoup.connect(grobidPreferences.getGrobidURL() + "/api/processCitation")
.header("Accept", MediaTypes.APPLICATION_BIBTEX)
.data("citations", rawCitation)
.data("consolidateCitations", String.valueOf(consolidateCitations.getCode()))
.method(Connection.Method.POST)
.ignoreContentType(true)
.timeout(20000)
.execute();
String httpResponse = response.body();
if (httpResponse == null || "@misc{-1,\n author = {}\n}\n".equals(httpResponse) || httpResponse.equals("@misc{-1,\n author = {" + rawCitation + "}\n}\n")) { // This filters empty BibTeX entries
throw new IOException("The GROBID server response does not contain anything.");
}
return BibtexParser.singleFromString(httpResponse, importFormatPreferences);
}
public List<BibEntry> processPDF(Path filePath, ImportFormatPreferences importFormatPreferences) throws IOException, ParseException {
Connection.Response response = Jsoup.connect(grobidPreferences.getGrobidURL() + "/api/processHeaderDocument")
.header("Accept", MediaTypes.APPLICATION_BIBTEX)
.data("input", filePath.toString(), Files.newInputStream(filePath))
.method(Connection.Method.POST)
.ignoreContentType(true)
.timeout(20000)
.execute();
String httpResponse = response.body();
if (httpResponse == null || "@misc{-1,\n author = {}\n}\n".equals(httpResponse)) { // This filters empty BibTeX entries
throw new IOException("The GROBID server response does not contain anything.");
}
BibtexParser parser = new BibtexParser(importFormatPreferences);
List<BibEntry> result = parser.parseEntries(httpResponse);
result.forEach(entry -> entry.setCitationKey(""));
return result;
}
}
| 3,984 | 40.082474 | 203 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/GroupsParser.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.List;
import org.jabref.logic.auxparser.DefaultAuxParser;
import org.jabref.logic.groups.DefaultGroupsFactory;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.MetadataSerializationConfiguration;
import org.jabref.logic.util.strings.QuotedStringTokenizer;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.groups.AbstractGroup;
import org.jabref.model.groups.AutomaticKeywordGroup;
import org.jabref.model.groups.AutomaticPersonsGroup;
import org.jabref.model.groups.ExplicitGroup;
import org.jabref.model.groups.GroupHierarchyType;
import org.jabref.model.groups.GroupTreeNode;
import org.jabref.model.groups.KeywordGroup;
import org.jabref.model.groups.RegexKeywordGroup;
import org.jabref.model.groups.SearchGroup;
import org.jabref.model.groups.TexGroup;
import org.jabref.model.groups.WordKeywordGroup;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.search.rules.SearchRules;
import org.jabref.model.search.rules.SearchRules.SearchFlags;
import org.jabref.model.strings.StringUtil;
import org.jabref.model.util.FileUpdateMonitor;
import org.slf4j.LoggerFactory;
/**
* Converts string representation of groups to a parsed {@link GroupTreeNode}.
*/
public class GroupsParser {
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(GroupsParser.class);
private GroupsParser() {
}
public static GroupTreeNode importGroups(List<String> orderedData, Character keywordSeparator, FileUpdateMonitor fileMonitor, MetaData metaData)
throws ParseException {
try {
GroupTreeNode cursor = null;
GroupTreeNode root = null;
for (String string : orderedData) {
// This allows to read databases that have been modified by, e.g., BibDesk
string = string.trim();
if (string.isEmpty()) {
continue;
}
int spaceIndex = string.indexOf(' ');
if (spaceIndex <= 0) {
throw new ParseException("Expected \"" + string + "\" to contain whitespace");
}
int level = Integer.parseInt(string.substring(0, spaceIndex));
AbstractGroup group = GroupsParser.fromString(string.substring(spaceIndex + 1), keywordSeparator, fileMonitor, metaData);
GroupTreeNode newNode = GroupTreeNode.fromGroup(group);
if (cursor == null) {
// create new root
cursor = newNode;
root = cursor;
} else {
// insert at desired location
while ((level <= cursor.getLevel()) && (cursor.getParent().isPresent())) {
cursor = cursor.getParent().get();
}
cursor.addChild(newNode);
cursor = newNode;
}
}
return root;
} catch (ParseException e) {
throw new ParseException(Localization
.lang("Group tree could not be parsed. If you save the BibTeX library, all groups will be lost."),
e);
}
}
/**
* Re-create a group instance from a textual representation.
*
* @param s The result from the group's toString() method.
* @return New instance of the encoded group.
* @throws ParseException If an error occurred and a group could not be created, e.g. due to a malformed regular expression.
*/
public static AbstractGroup fromString(String s, Character keywordSeparator, FileUpdateMonitor fileMonitor, MetaData metaData)
throws ParseException {
if (s.startsWith(MetadataSerializationConfiguration.KEYWORD_GROUP_ID)) {
return keywordGroupFromString(s, keywordSeparator);
}
if (s.startsWith(MetadataSerializationConfiguration.ALL_ENTRIES_GROUP_ID)) {
return allEntriesGroupFromString(s);
}
if (s.startsWith(MetadataSerializationConfiguration.SEARCH_GROUP_ID)) {
return searchGroupFromString(s);
}
if (s.startsWith(MetadataSerializationConfiguration.EXPLICIT_GROUP_ID)) {
return explicitGroupFromString(s, keywordSeparator);
}
if (s.startsWith(MetadataSerializationConfiguration.LEGACY_EXPLICIT_GROUP_ID)) {
return legacyExplicitGroupFromString(s, keywordSeparator);
}
if (s.startsWith(MetadataSerializationConfiguration.AUTOMATIC_PERSONS_GROUP_ID)) {
return automaticPersonsGroupFromString(s);
}
if (s.startsWith(MetadataSerializationConfiguration.AUTOMATIC_KEYWORD_GROUP_ID)) {
return automaticKeywordGroupFromString(s);
}
if (s.startsWith(MetadataSerializationConfiguration.TEX_GROUP_ID)) {
return texGroupFromString(s, fileMonitor, metaData);
}
throw new ParseException("Unknown group: " + s);
}
private static AbstractGroup texGroupFromString(String string, FileUpdateMonitor fileMonitor, MetaData metaData) throws ParseException {
QuotedStringTokenizer tok = new QuotedStringTokenizer(string.substring(MetadataSerializationConfiguration.TEX_GROUP_ID
.length()), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
GroupHierarchyType context = GroupHierarchyType.getByNumberOrDefault(Integer.parseInt(tok.nextToken()));
try {
Path path = Path.of(tok.nextToken());
try {
TexGroup newGroup = TexGroup.create(name, context, path, new DefaultAuxParser(new BibDatabase()), fileMonitor, metaData);
addGroupDetails(tok, newGroup);
return newGroup;
} catch (IOException ex) {
// Problem accessing file -> create without file monitoring
LOGGER.warn("Could not access file {}. The group {} will not reflect changes to the aux file.", path, name, ex);
TexGroup newGroup = TexGroup.create(name, context, path, new DefaultAuxParser(new BibDatabase()), metaData);
addGroupDetails(tok, newGroup);
return newGroup;
}
} catch (InvalidPathException | IOException ex) {
throw new ParseException(ex);
}
}
private static AbstractGroup automaticPersonsGroupFromString(String string) {
if (!string.startsWith(MetadataSerializationConfiguration.AUTOMATIC_PERSONS_GROUP_ID)) {
throw new IllegalArgumentException("KeywordGroup cannot be created from \"" + string + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(string.substring(MetadataSerializationConfiguration.AUTOMATIC_PERSONS_GROUP_ID
.length()), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
GroupHierarchyType context = GroupHierarchyType.getByNumberOrDefault(Integer.parseInt(tok.nextToken()));
Field field = FieldFactory.parseField(StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
AutomaticPersonsGroup newGroup = new AutomaticPersonsGroup(name, context, field);
addGroupDetails(tok, newGroup);
return newGroup;
}
private static AbstractGroup automaticKeywordGroupFromString(String string) {
if (!string.startsWith(MetadataSerializationConfiguration.AUTOMATIC_KEYWORD_GROUP_ID)) {
throw new IllegalArgumentException("KeywordGroup cannot be created from \"" + string + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(string.substring(MetadataSerializationConfiguration.AUTOMATIC_KEYWORD_GROUP_ID
.length()), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
GroupHierarchyType context = GroupHierarchyType.getByNumberOrDefault(Integer.parseInt(tok.nextToken()));
Field field = FieldFactory.parseField(StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
Character delimiter = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR).charAt(0);
Character hierarchicalDelimiter = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR).charAt(0);
AutomaticKeywordGroup newGroup = new AutomaticKeywordGroup(name, context, field, delimiter, hierarchicalDelimiter);
addGroupDetails(tok, newGroup);
return newGroup;
}
/**
* Parses s and recreates the KeywordGroup from it.
*
* @param s The String representation obtained from KeywordGroup.toString()
*/
private static KeywordGroup keywordGroupFromString(String s, Character keywordSeparator) throws ParseException {
if (!s.startsWith(MetadataSerializationConfiguration.KEYWORD_GROUP_ID)) {
throw new IllegalArgumentException("KeywordGroup cannot be created from \"" + s + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(s.substring(MetadataSerializationConfiguration.KEYWORD_GROUP_ID
.length()), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
GroupHierarchyType context = GroupHierarchyType.getByNumberOrDefault(Integer.parseInt(tok.nextToken()));
Field field = FieldFactory.parseField(StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
String expression = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
boolean caseSensitive = Integer.parseInt(tok.nextToken()) == 1;
boolean regExp = Integer.parseInt(tok.nextToken()) == 1;
KeywordGroup newGroup;
if (regExp) {
newGroup = new RegexKeywordGroup(name, context, field, expression, caseSensitive);
} else {
newGroup = new WordKeywordGroup(name, context, field, expression, caseSensitive, keywordSeparator, false);
}
addGroupDetails(tok, newGroup);
return newGroup;
}
private static ExplicitGroup explicitGroupFromString(String input, Character keywordSeparator) throws ParseException {
if (!input.startsWith(MetadataSerializationConfiguration.EXPLICIT_GROUP_ID)) {
throw new IllegalArgumentException("ExplicitGroup cannot be created from \"" + input + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(input.substring(MetadataSerializationConfiguration.EXPLICIT_GROUP_ID.length()),
MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
try {
int context = Integer.parseInt(tok.nextToken());
ExplicitGroup newGroup = new ExplicitGroup(name, GroupHierarchyType.getByNumberOrDefault(context), keywordSeparator);
addGroupDetails(tok, newGroup);
return newGroup;
} catch (NumberFormatException exception) {
throw new ParseException("Could not parse context in " + input);
}
}
private static ExplicitGroup legacyExplicitGroupFromString(String input, Character keywordSeparator) throws ParseException {
if (!input.startsWith(MetadataSerializationConfiguration.LEGACY_EXPLICIT_GROUP_ID)) {
throw new IllegalArgumentException("ExplicitGroup cannot be created from \"" + input + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(input.substring(MetadataSerializationConfiguration.LEGACY_EXPLICIT_GROUP_ID.length()),
MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
try {
int context = Integer.parseInt(tok.nextToken());
ExplicitGroup newGroup = new ExplicitGroup(name, GroupHierarchyType.getByNumberOrDefault(context), keywordSeparator);
GroupsParser.addLegacyEntryKeys(tok, newGroup);
return newGroup;
} catch (NumberFormatException exception) {
throw new ParseException("Could not parse context in " + input);
}
}
/**
* Called only when created fromString.
* JabRef used to store the entries of an explicit group in the serialization, e.g.
* ExplicitGroup:GroupName\;0\;Key1\;Key2\;;
* This method exists for backwards compatibility.
*/
private static void addLegacyEntryKeys(QuotedStringTokenizer tok, ExplicitGroup group) {
while (tok.hasMoreTokens()) {
String key = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
group.addLegacyEntryKey(key);
}
}
private static AbstractGroup allEntriesGroupFromString(String s) {
if (!s.startsWith(MetadataSerializationConfiguration.ALL_ENTRIES_GROUP_ID)) {
throw new IllegalArgumentException("AllEntriesGroup cannot be created from \"" + s + "\".");
}
return DefaultGroupsFactory.getAllEntriesGroup();
}
/**
* Parses s and recreates the SearchGroup from it.
*
* @param s The String representation obtained from SearchGroup.toString(), or null if incompatible
*/
private static AbstractGroup searchGroupFromString(String s) {
if (!s.startsWith(MetadataSerializationConfiguration.SEARCH_GROUP_ID)) {
throw new IllegalArgumentException("SearchGroup cannot be created from \"" + s + "\".");
}
QuotedStringTokenizer tok = new QuotedStringTokenizer(s.substring(MetadataSerializationConfiguration.SEARCH_GROUP_ID.length()),
MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
String name = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
int context = Integer.parseInt(tok.nextToken());
String expression = StringUtil.unquote(tok.nextToken(), MetadataSerializationConfiguration.GROUP_QUOTE_CHAR);
EnumSet<SearchFlags> searchFlags = EnumSet.noneOf(SearchFlags.class);
if (Integer.parseInt(tok.nextToken()) == 1) {
searchFlags.add(SearchRules.SearchFlags.CASE_SENSITIVE);
}
if (Integer.parseInt(tok.nextToken()) == 1) {
searchFlags.add(SearchRules.SearchFlags.REGULAR_EXPRESSION);
}
// version 0 contained 4 additional booleans to specify search
// fields; these are ignored now, all fields are always searched
SearchGroup searchGroup = new SearchGroup(name,
GroupHierarchyType.getByNumberOrDefault(context), expression, searchFlags
);
addGroupDetails(tok, searchGroup);
return searchGroup;
}
private static void addGroupDetails(QuotedStringTokenizer tokenizer, AbstractGroup group) {
if (tokenizer.hasMoreTokens()) {
group.setExpanded(Integer.parseInt(tokenizer.nextToken()) == 1);
group.setColor(tokenizer.nextToken());
group.setIconName(tokenizer.nextToken());
group.setDescription(tokenizer.nextToken());
}
}
}
| 16,381 | 52.711475 | 148 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/INSPIREBibtexFilterReader.java | package org.jabref.logic.importer.util;
import java.io.BufferedReader;
import java.io.FilterReader;
import java.io.IOException;
import java.io.Reader;
import java.util.regex.Pattern;
/**
* Warning -- it is not a generic filter, only read is implemented!
* <p>
* Note: this is just a quick port of the original SPIRESBibtexFilterReader.
* <p>
* TODO: Fix grammar in bibtex entries -- it may return invalid bibkeys (with space)
*/
public class INSPIREBibtexFilterReader extends FilterReader {
private static final Pattern PATTERN = Pattern.compile("@Article\\{.*,");
private final BufferedReader inReader;
private String line;
private int pos;
private boolean pre;
public INSPIREBibtexFilterReader(final Reader initialReader) {
super(initialReader);
inReader = new BufferedReader(initialReader);
pos = -1;
pre = false;
}
private String readpreLine() throws IOException {
String l;
do {
l = inReader.readLine();
if (l == null) {
return null;
}
if (l.contains("<pre>")) {
pre = true;
l = inReader.readLine();
}
if (l == null) {
return null;
}
if (l.contains("</pre>")) {
pre = false;
}
} while (!pre);
return l;
}
private String fixBibkey(final String preliminaryLine) {
if (preliminaryLine == null) {
return null;
}
if (PATTERN.matcher(preliminaryLine).find()) {
return preliminaryLine.replace(' ', '_');
} else {
return preliminaryLine;
}
}
@Override
public int read() throws IOException {
if (pos < 0) {
line = fixBibkey(readpreLine());
pos = 0;
if (line == null) {
return -1;
}
}
if (pos >= line.length()) {
pos = -1;
return '\n';
}
return line.charAt(pos++);
}
}
| 2,091 | 24.82716 | 84 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/IdentifierParser.java | package org.jabref.logic.importer.util;
import java.util.Objects;
import java.util.Optional;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.ARK;
import org.jabref.model.entry.identifier.ArXivIdentifier;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.entry.identifier.ISBN;
import org.jabref.model.entry.identifier.Identifier;
import org.jabref.model.entry.identifier.MathSciNetId;
import org.jabref.model.strings.StringUtil;
public class IdentifierParser {
private final BibEntry entry;
public IdentifierParser(BibEntry entry) {
Objects.requireNonNull(entry);
this.entry = entry;
}
public Optional<? extends Identifier> parse(Field field) {
String fieldValue = entry.getField(field).orElse("");
if (StringUtil.isBlank(fieldValue)) {
return Optional.empty();
}
if (StandardField.DOI == field) {
return DOI.parse(fieldValue);
} else if (StandardField.ISBN == field) {
return ISBN.parse(fieldValue);
} else if (StandardField.EPRINT == field) {
return parseEprint(fieldValue);
} else if (StandardField.MR_NUMBER == field) {
return MathSciNetId.parse(fieldValue);
}
return Optional.empty();
}
private Optional<? extends Identifier> parseEprint(String eprint) {
Optional<String> eprintTypeOpt = entry.getField(StandardField.EPRINTTYPE);
if (eprintTypeOpt.isPresent()) {
String eprintType = eprintTypeOpt.get();
if ("arxiv".equalsIgnoreCase(eprintType)) {
return ArXivIdentifier.parse(eprint);
} else if ("ark".equalsIgnoreCase(eprintType)) {
return ARK.parse(eprint);
}
}
return Optional.empty();
}
}
| 1,943 | 32.517241 | 82 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/JsonReader.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import org.jabref.logic.importer.ParseException;
import kong.unirest.json.JSONArray;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
/**
* Converts an {@link InputStream} into a {@link JSONObject}.
*/
public class JsonReader {
/**
* Converts the given input stream into a {@link JSONObject}.
*
* @return A {@link JSONObject}. An empty JSON object is returned in the case an empty stream is passed.
*/
public static JSONObject toJsonObject(InputStream inputStream) throws ParseException {
try {
String inputStr = new String((inputStream.readAllBytes()), StandardCharsets.UTF_8);
// Fallback: in case an empty stream was passed, return an empty JSON object
if (inputStr.isBlank()) {
return new JSONObject();
}
return new JSONObject(inputStr);
} catch (IOException | JSONException e) {
throw new ParseException(e);
}
}
public static JSONArray toJsonArray(InputStream stream) throws ParseException {
try {
String inpStr = new String((stream.readAllBytes()), StandardCharsets.UTF_8);
if (inpStr.isBlank()) {
return new JSONArray();
}
return new JSONArray(inpStr);
} catch (IOException | JSONException e) {
throw new ParseException(e);
}
}
}
| 1,563 | 31.583333 | 108 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/MathMLParser.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Objects;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.transform.Result;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MathMLParser {
private static final Logger LOGGER = LoggerFactory.getLogger(MathMLParser.class);
private static final String XSLT_FILE_PATH = "/xslt/mathml_latex/mmltex.xsl";
/**
* Parses the MathML element into its corresponding
* LaTeX representation, using an XSLT transformation file
*
* @param reader the stream reader
* @return Returns the LaTeX representation
*/
public static String parse(XMLStreamReader reader) {
String xmlContent = "";
String latexResult = "<Unsupported MathML expression>";
try {
// extract XML content
xmlContent = StaxParser.getXMLContent(reader);
// convert to LaTeX using XSLT file
Source xmlSource = new StreamSource(new StringReader(xmlContent));
URL xsltResource = MathMLParser.class.getResource(XSLT_FILE_PATH);
Source xsltSource = new StreamSource(Objects.requireNonNull(xsltResource).openStream(), xsltResource.toURI().toASCIIString());
TransformerFactory transformerFactory = TransformerFactory.newInstance();
Transformer transformer = transformerFactory.newTransformer(xsltSource);
StringWriter writer = new StringWriter();
Result result = new StreamResult(writer);
transformer.transform(xmlSource, result);
latexResult = writer.getBuffer().toString();
} catch (XMLStreamException e) {
LOGGER.debug("An exception occurred when getting XML content", e);
} catch (IOException e) {
LOGGER.debug("An I/O exception occurred", e);
} catch (URISyntaxException e) {
LOGGER.debug("XSLT Source URI invalid", e);
} catch (TransformerException e) {
LOGGER.debug("An exception occurred during transformation", e);
}
return latexResult;
}
}
| 2,561 | 35.6 | 138 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/MediaTypes.java | package org.jabref.logic.importer.util;
/**
* Stores MediaTypes required by JabRef (which are not availble in used libraries)
*/
public class MediaTypes {
public static final String APPLICATION_BIBTEX = "application/x-bibtex";
public static final String CITATIONSTYLES_JSON = "application/vnd.citationstyles.csl+json";
}
| 332 | 32.3 | 95 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/MetaDataParser.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.regex.Pattern;
import org.jabref.logic.cleanup.FieldFormatterCleanups;
import org.jabref.logic.importer.ParseException;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypeBuilder;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.entry.types.EntryTypeFactory;
import org.jabref.model.metadata.ContentSelectors;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.metadata.SaveOrder;
import org.jabref.model.strings.StringUtil;
import org.jabref.model.util.FileUpdateMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Writing is done at {@link org.jabref.logic.exporter.MetaDataSerializer}.
*/
public class MetaDataParser {
private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataParser.class);
private static FileUpdateMonitor fileMonitor;
private static final Pattern SINGLE_BACKSLASH = Pattern.compile("[^\\\\]\\\\[^\\\\]");
public MetaDataParser(FileUpdateMonitor fileMonitor) {
MetaDataParser.fileMonitor = fileMonitor;
}
public static Optional<BibEntryType> parseCustomEntryType(String comment) {
String rest = comment.substring(MetaData.ENTRYTYPE_FLAG.length());
int indexEndOfName = rest.indexOf(':');
if (indexEndOfName < 0) {
return Optional.empty();
}
String fieldsDescription = rest.substring(indexEndOfName + 2);
int indexEndOfRequiredFields = fieldsDescription.indexOf(']');
int indexEndOfOptionalFields = fieldsDescription.indexOf(']', indexEndOfRequiredFields + 1);
if ((indexEndOfRequiredFields < 4) || (indexEndOfOptionalFields < (indexEndOfRequiredFields + 6))) {
return Optional.empty();
}
EntryType type = EntryTypeFactory.parse(rest.substring(0, indexEndOfName));
String reqFields = fieldsDescription.substring(4, indexEndOfRequiredFields);
String optFields = fieldsDescription.substring(indexEndOfRequiredFields + 6, indexEndOfOptionalFields);
BibEntryTypeBuilder entryTypeBuilder = new BibEntryTypeBuilder()
.withType(type)
.withRequiredFields(FieldFactory.parseOrFieldsList(reqFields))
// Important fields are optional fields, but displayed first. Thus, they do not need to be separated by "/".
// See org.jabref.model.entry.field.FieldPriority for details on important optional fields.
.withImportantFields(FieldFactory.parseFieldList(optFields));
return Optional.of(entryTypeBuilder.build());
}
/**
* Parses the given data map and returns a new resulting {@link MetaData} instance.
*/
public MetaData parse(Map<String, String> data, Character keywordSeparator) throws ParseException {
return parse(new MetaData(), data, keywordSeparator);
}
/**
* Parses the data map and changes the given {@link MetaData} instance respectively.
*/
public MetaData parse(MetaData metaData, Map<String, String> data, Character keywordSeparator) throws ParseException {
List<String> defaultCiteKeyPattern = new ArrayList<>();
Map<EntryType, List<String>> nonDefaultCiteKeyPatterns = new HashMap<>();
// process groups (GROUPSTREE and GROUPSTREE_LEGACY) at the very end (otherwise it can happen that not all dependent data are set)
List<Map.Entry<String, String>> entryList = new ArrayList<>(data.entrySet());
entryList.sort(groupsLast());
for (Map.Entry<String, String> entry : entryList) {
List<String> values = getAsList(entry.getValue());
if (entry.getKey().startsWith(MetaData.PREFIX_KEYPATTERN)) {
EntryType entryType = EntryTypeFactory.parse(entry.getKey().substring(MetaData.PREFIX_KEYPATTERN.length()));
nonDefaultCiteKeyPatterns.put(entryType, Collections.singletonList(getSingleItem(values)));
} else if (entry.getKey().startsWith(MetaData.SELECTOR_META_PREFIX)) {
// edge case, it might be one special field e.g. article from biblatex-apa, but we can't distinguish this from any other field and rather prefer to handle it as UnknownField
metaData.addContentSelector(ContentSelectors.parse(FieldFactory.parseField(entry.getKey().substring(MetaData.SELECTOR_META_PREFIX.length())), StringUtil.unquote(entry.getValue(), MetaData.ESCAPE_CHARACTER)));
} else if (entry.getKey().equals(MetaData.FILE_DIRECTORY)) {
metaData.setDefaultFileDirectory(parseDirectory(entry.getValue()));
} else if (entry.getKey().startsWith(MetaData.FILE_DIRECTORY + '-')) {
// The user name starts directly after FILE_DIRECTORY + '-'
String user = entry.getKey().substring(MetaData.FILE_DIRECTORY.length() + 1);
metaData.setUserFileDirectory(user, parseDirectory(entry.getValue()));
} else if (entry.getKey().startsWith(MetaData.FILE_DIRECTORY_LATEX)) {
// The user name starts directly after FILE_DIRECTORY_LATEX" + '-'
String user = entry.getKey().substring(MetaData.FILE_DIRECTORY_LATEX.length() + 1);
Path path = Path.of(parseDirectory(entry.getValue())).normalize();
metaData.setLatexFileDirectory(user, path);
} else if (entry.getKey().equals(MetaData.SAVE_ACTIONS)) {
metaData.setSaveActions(FieldFormatterCleanups.parse(values));
} else if (entry.getKey().equals(MetaData.DATABASE_TYPE)) {
metaData.setMode(BibDatabaseMode.parse(getSingleItem(values)));
} else if (entry.getKey().equals(MetaData.KEYPATTERNDEFAULT)) {
defaultCiteKeyPattern = Collections.singletonList(getSingleItem(values));
} else if (entry.getKey().equals(MetaData.PROTECTED_FLAG_META)) {
if (Boolean.parseBoolean(getSingleItem(values))) {
metaData.markAsProtected();
} else {
metaData.markAsNotProtected();
}
} else if (entry.getKey().equals(MetaData.SAVE_ORDER_CONFIG)) {
metaData.setSaveOrderConfig(SaveOrder.parse(values));
} else if (entry.getKey().equals(MetaData.GROUPSTREE) || entry.getKey().equals(MetaData.GROUPSTREE_LEGACY)) {
metaData.setGroups(GroupsParser.importGroups(values, keywordSeparator, fileMonitor, metaData));
} else if (entry.getKey().equals(MetaData.VERSION_DB_STRUCT)) {
metaData.setVersionDBStructure(getSingleItem(values));
} else {
// Keep meta data items that we do not know in the file
metaData.putUnknownMetaDataItem(entry.getKey(), values);
}
}
if (!defaultCiteKeyPattern.isEmpty() || !nonDefaultCiteKeyPatterns.isEmpty()) {
metaData.setCiteKeyPattern(defaultCiteKeyPattern, nonDefaultCiteKeyPatterns);
}
return metaData;
}
/**
* Parse the content of the value as provided by "raw" content.
*
* We do not use unescaped value (created by @link{#getAsList(java.lang.String)}),
* because this leads to difficulties with UNC names.
*
* No normalization is done - the general file directory could be passed as Mac OS X path, but the user could sit on Windows.
*
* @param value the raw value (as stored in the .bib file)
*/
static String parseDirectory(String value) {
value = StringUtil.removeStringAtTheEnd(value, MetaData.SEPARATOR_STRING);
if (value.contains("\\\\\\\\")) {
// This is an escaped Windows UNC path
return value.replace("\\\\", "\\");
} else if (value.contains("\\\\") && !SINGLE_BACKSLASH.matcher(value).find()) {
// All backslashes escaped
return value.replace("\\\\", "\\");
} else {
// No backslash escaping
return value;
}
}
private static Comparator<? super Map.Entry<String, String>> groupsLast() {
return (s1, s2) -> MetaData.GROUPSTREE.equals(s1.getKey()) || MetaData.GROUPSTREE_LEGACY.equals(s1.getKey()) ? 1 :
MetaData.GROUPSTREE.equals(s2.getKey()) || MetaData.GROUPSTREE_LEGACY.equals(s2.getKey()) ? -1 : 0;
}
/**
* Returns the first item in the list.
* If the specified list does not contain exactly one item, then a {@link ParseException} will be thrown.
*/
private static String getSingleItem(List<String> value) throws ParseException {
if (value.size() == 1) {
return value.get(0);
} else {
throw new ParseException("Expected a single item but received " + value);
}
}
private static List<String> getAsList(String value) throws ParseException {
StringReader valueReader = new StringReader(value);
List<String> orderedValue = new ArrayList<>();
// We must allow for ; and \ in escape sequences.
try {
Optional<String> unit;
while ((unit = getNextUnit(valueReader)).isPresent()) {
orderedValue.add(unit.get());
}
} catch (IOException ex) {
LOGGER.error("Weird error while parsing meta data.", ex);
throw new ParseException("Weird error while parsing meta data.", ex);
}
return orderedValue;
}
/**
* Reads the next unit. Units are delimited by ';' (MetaData.SEPARATOR_CHARACTER).
*/
private static Optional<String> getNextUnit(Reader reader) throws IOException {
int c;
boolean escape = false;
StringBuilder res = new StringBuilder();
while ((c = reader.read()) != -1) {
if (escape) {
// at org.jabref.logic.exporter.MetaDataSerializer.serializeMetaData, only MetaData.SEPARATOR_CHARACTER, MetaData.ESCAPE_CHARACTER are quoted
// That means ; and \\
char character = (char) c;
if (character != MetaData.SEPARATOR_CHARACTER && character != MetaData.ESCAPE_CHARACTER) {
// Keep the escape character
res.append("\\");
}
res.append(character);
escape = false;
} else if (c == MetaData.ESCAPE_CHARACTER) {
escape = true;
} else if (c == MetaData.SEPARATOR_CHARACTER) {
break;
} else {
res.append((char) c);
}
}
if (res.length() > 0) {
return Optional.of(res.toString());
}
return Optional.empty();
}
}
| 11,161 | 47.320346 | 224 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/ShortDOIService.java | package org.jabref.logic.importer.util;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import org.jabref.logic.importer.ParseException;
import org.jabref.logic.net.URLDownload;
import org.jabref.model.entry.identifier.DOI;
import kong.unirest.json.JSONException;
import kong.unirest.json.JSONObject;
import org.apache.http.client.utils.URIBuilder;
/**
* Class for obtaining shortened DOI names. See <a href="https://shortdoi.org">https://shortdoi.org</a>.
*/
public class ShortDOIService {
private static final String BASIC_URL = "http://shortdoi.org/";
/**
* Obtains shortened DOI name for given DOI
*
* @param doi DOI
* @return A shortened DOI name
*/
public DOI getShortDOI(DOI doi) throws ShortDOIServiceException {
JSONObject responseJSON = makeRequest(doi);
String shortDoi = responseJSON.getString("ShortDOI");
return new DOI(shortDoi);
}
private JSONObject makeRequest(DOI doi) throws ShortDOIServiceException {
URIBuilder uriBuilder = null;
URL url = null;
try {
uriBuilder = new URIBuilder(BASIC_URL);
uriBuilder.setPath(uriBuilder.getPath() + doi.getDOI());
uriBuilder.addParameter("format", "json");
URI uri = uriBuilder.build();
url = uri.toURL();
} catch (URISyntaxException | MalformedURLException e) {
throw new ShortDOIServiceException("Cannot get short DOI", e);
}
URLDownload urlDownload = new URLDownload(url);
try {
JSONObject resultAsJSON = JsonReader.toJsonObject(urlDownload.asInputStream());
if (resultAsJSON.isEmpty()) {
throw new ShortDOIServiceException("Cannot get short DOI");
}
return resultAsJSON;
} catch (ParseException | IOException | JSONException e) {
throw new ShortDOIServiceException("Cannot get short DOI", e);
}
}
}
| 2,064 | 30.287879 | 104 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/ShortDOIServiceException.java | package org.jabref.logic.importer.util;
import org.jabref.logic.JabRefException;
public class ShortDOIServiceException extends JabRefException {
public ShortDOIServiceException(String message) {
super(message);
}
public ShortDOIServiceException(String message, Throwable cause) {
super(message, cause);
}
public ShortDOIServiceException(String message, String localizedMessage) {
super(message, localizedMessage);
}
public ShortDOIServiceException(String message, String localizedMessage, Throwable cause) {
super(message, localizedMessage, cause);
}
public ShortDOIServiceException(Throwable cause) {
super(cause);
}
}
| 706 | 26.192308 | 95 | java |
null | jabref-main/src/main/java/org/jabref/logic/importer/util/StaxParser.java | package org.jabref.logic.importer.util;
import javax.xml.stream.XMLStreamConstants;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
public class StaxParser {
/**
* Extracts the XML content inside the first
* encountered parent tag, including tag elements,
* attributes, namespace, prefix and contained text
*
* @param reader the stream reader
* @return Returns the inner XML content
*/
public static String getXMLContent(XMLStreamReader reader) throws XMLStreamException {
// skip over START DOCUMENT event
while (reader.getEventType() == XMLStreamConstants.START_DOCUMENT && reader.hasNext()) {
reader.next();
}
StringBuilder content = new StringBuilder();
String parentTag = reader.getLocalName();
int depth = 1;
content.append(getXMLStartTag(reader, true));
while (reader.hasNext()) {
int event = reader.next();
if (event == XMLStreamConstants.START_ELEMENT) {
String tagName = reader.getLocalName();
if (tagName.equals(parentTag)) {
// nested tag of same type
depth++;
}
// append the start tag
content.append(getXMLStartTag(reader, false));
} else if (event == XMLStreamConstants.END_ELEMENT) {
String tagName = reader.getLocalName();
// append the end tag
content.append(getXMLEndTag(reader));
if (tagName.equals(parentTag)) {
depth--;
if (depth == 0) {
// reached the closing tag of the first parent tag
break;
}
}
} else if (event == XMLStreamConstants.CHARACTERS) {
content.append(getXMLText(reader));
} else if (event == XMLStreamConstants.CDATA) {
content.append(getXMLCData(reader));
} else if (event == XMLStreamConstants.COMMENT) {
content.append(getXMLComment(reader));
} else if (event == XMLStreamConstants.PROCESSING_INSTRUCTION) {
content.append(getXMLProcessingInstruction(reader));
} else if (event == XMLStreamConstants.SPACE || event == XMLStreamConstants.ENTITY_REFERENCE) {
content.append(getXMLText(reader));
}
}
return content.toString().trim();
}
private static String getXMLStartTag(XMLStreamReader reader, boolean addNamespaceURI) {
StringBuilder startTag = new StringBuilder();
String prefix = reader.getPrefix();
startTag.append("<")
.append(prefix != null && !prefix.isBlank() ? prefix + ":" : "")
.append(reader.getName().getLocalPart());
String namespaceURI = reader.getNamespaceURI();
if (addNamespaceURI && namespaceURI != null) {
startTag.append(" xmlns")
.append(prefix != null && !prefix.isBlank() ? ":" + prefix : "")
.append("=\"")
.append(namespaceURI)
.append("\"");
}
for (int i = 0; i < reader.getAttributeCount(); i++) {
startTag.append(" ").append(reader.getAttributeLocalName(i)).append("=\"").append(reader.getAttributeValue(i)).append("\"");
}
if (reader.isEndElement()) {
startTag.append("/");
}
startTag.append(">");
return startTag.toString();
}
private static String getXMLEndTag(XMLStreamReader reader) {
StringBuilder endTag = new StringBuilder();
String prefix = reader.getPrefix();
endTag.append("</")
.append(prefix != null && !prefix.isBlank() ? prefix + ":" : "")
.append(reader.getName().getLocalPart())
.append(">");
return endTag.toString();
}
private static String getXMLCData(XMLStreamReader reader) {
return "<![CDATA[" + reader.getText() + "]]>";
}
private static String getXMLComment(XMLStreamReader reader) {
return "<!--" + reader.getText() + "-->";
}
private static String getXMLProcessingInstruction(XMLStreamReader reader) {
return "<?" + reader.getPITarget() + " " + reader.getPIData() + "?>";
}
private static String getXMLText(XMLStreamReader reader) {
return reader.getText().trim();
}
}
| 4,563 | 34.379845 | 136 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/ASCIICharacterChecker.java | package org.jabref.logic.integrity;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import com.google.common.base.CharMatcher;
public class ASCIICharacterChecker implements EntryChecker {
/**
* Detect any non ASCII encoded characters, e.g., umlauts or unicode in the fields
*/
@Override
public List<IntegrityMessage> check(BibEntry entry) {
List<IntegrityMessage> results = new ArrayList<>();
for (Map.Entry<Field, String> field : entry.getFieldMap().entrySet()) {
boolean asciiOnly = CharMatcher.ascii().matchesAllOf(field.getValue());
if (!asciiOnly) {
results.add(new IntegrityMessage(Localization.lang("Non-ASCII encoded character found"), entry,
field.getKey()));
}
}
return results;
}
}
| 983 | 30.741935 | 111 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/AbbreviationChecker.java | package org.jabref.logic.integrity;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.jabref.logic.journals.JournalAbbreviationRepository;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
public class AbbreviationChecker implements EntryChecker {
private final JournalAbbreviationRepository abbreviationRepository;
private final Set<Field> fields = FieldFactory.getBookNameFields();
public AbbreviationChecker(JournalAbbreviationRepository abbreviationRepository) {
this.abbreviationRepository = abbreviationRepository;
}
@Override
public List<IntegrityMessage> check(BibEntry entry) {
List<IntegrityMessage> messages = new ArrayList<>();
for (Field field : fields) {
Optional<String> value = entry.getLatexFreeField(field);
value.filter(abbreviationRepository::isAbbreviatedName)
.ifPresent(val -> messages.add(new IntegrityMessage(Localization.lang("abbreviation detected"), entry, field)));
}
return messages;
}
}
| 1,220 | 34.911765 | 129 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/AmpersandChecker.java | package org.jabref.logic.integrity;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.MatchResult;
import java.util.regex.Pattern;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import com.google.common.base.CharMatcher;
/**
* Checks if the BibEntry contains unescaped ampersands.
*/
public class AmpersandChecker implements EntryChecker {
// matches for an & preceded by any number of \
private static final Pattern BACKSLASH_PRECEDED_AMPERSAND = Pattern.compile("\\\\*&");
@Override
public List<IntegrityMessage> check(BibEntry entry) {
List<IntegrityMessage> results = new ArrayList<>();
for (Map.Entry<Field, String> field : entry.getFieldMap().entrySet()) {
// counts the number of even \ occurrences preceding an &
long unescapedAmpersands = BACKSLASH_PRECEDED_AMPERSAND.matcher(field.getValue())
.results()
.map(MatchResult::group)
.filter(m -> CharMatcher.is('\\').countIn(m) % 2 == 0)
.count();
if (unescapedAmpersands > 0) {
results.add(new IntegrityMessage(Localization.lang("Found %0 unescaped '&'", unescapedAmpersands), entry, field.getKey()));
// note: when changing the message - also do so in tests
}
}
return results;
}
}
| 1,486 | 34.404762 | 139 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/BibStringChecker.java | package org.jabref.logic.integrity;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.bibtex.FieldWriter;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldProperty;
/**
* Checks, if there is an even number of unescaped # (FieldWriter.BIBTEX_STRING_START_END_SYMBOL)
*/
public class BibStringChecker implements EntryChecker {
// Detect FieldWriter.BIBTEX_STRING_START_END_SYMBOL (#) if it doesn't have a \ in front of it or if it starts the string
private static final Pattern UNESCAPED_HASH = Pattern.compile("(?<!\\\\)" + FieldWriter.BIBTEX_STRING_START_END_SYMBOL + "|^" + FieldWriter.BIBTEX_STRING_START_END_SYMBOL);
@Override
public List<IntegrityMessage> check(BibEntry entry) {
List<IntegrityMessage> results = new ArrayList<>();
Map<Field, String> fields = entry.getFieldMap();
for (Map.Entry<Field, String> field : fields.entrySet()) {
if (!field.getKey().getProperties().contains(FieldProperty.VERBATIM)) {
Matcher hashMatcher = UNESCAPED_HASH.matcher(field.getValue());
int hashCount = 0;
while (hashMatcher.find()) {
hashCount++;
}
if ((hashCount & 1) == 1) { // Check if odd
// # is FieldWriter.BIBTEX_STRING_START_END_SYMBOL
results.add(new IntegrityMessage(Localization.lang("odd number of unescaped '#'"), entry,
field.getKey()));
}
}
}
return results;
}
}
| 1,778 | 37.673913 | 176 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/BibTeXEntryTypeChecker.java | package org.jabref.logic.integrity;
import java.util.Collections;
import java.util.List;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.types.EntryTypeFactory;
/**
* BibTeX mode only checker
*/
public class BibTeXEntryTypeChecker implements EntryChecker {
/**
* Will check if the current library uses any entry types from another mode.
* For example it will warn the user if he uses entry types defined for Biblatex inside a BibTeX library.
*/
@Override
public List<IntegrityMessage> check(BibEntry entry) {
if (EntryTypeFactory.isExclusiveBiblatex(entry.getType())) {
return Collections.singletonList(
new IntegrityMessage(Localization.lang("Entry type %0 is only defined for Biblatex but not for BibTeX", entry.getType().getDisplayName()), entry, InternalField.KEY_FIELD)
);
}
return Collections.emptyList();
}
}
| 1,036 | 34.758621 | 190 | java |
null | jabref-main/src/main/java/org/jabref/logic/integrity/BooktitleChecker.java | package org.jabref.logic.integrity;
import java.util.Locale;
import java.util.Optional;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.strings.StringUtil;
public class BooktitleChecker implements ValueChecker {
@Override
public Optional<String> checkValue(String value) {
if (StringUtil.isBlank(value)) {
return Optional.empty();
}
if (value.toLowerCase(Locale.ENGLISH).endsWith("conference on")) {
return Optional.of(Localization.lang("booktitle ends with 'conference on'"));
}
return Optional.empty();
}
}
| 610 | 24.458333 | 89 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.