repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
null
jabref-main/src/main/java/org/jabref/logic/formatter/IdentityFormatter.java
package org.jabref.logic.formatter; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * It may seem useless, but is needed as a fallback option */ public class IdentityFormatter extends Formatter { @Override public String getName() { return Localization.lang("Identity"); } @Override public String getKey() { return "identity"; } @Override public String format(String value) { Objects.requireNonNull(value); return value; } @Override public String getDescription() { return Localization.lang("Does nothing."); } @Override public String getExampleInput() { return "JabRef"; } }
761
18.538462
58
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/AddBracesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class AddBracesFormatter extends Formatter { @Override public String getName() { return Localization.lang("Add enclosing braces"); } @Override public String getKey() { return "add_braces"; } @Override public String format(String value) { Objects.requireNonNull(value); if ((value.length() >= 2) && (value.charAt(0) != '{') && (value.charAt(value.length() - 1) != '}')) { // Title does not start with { and does not end with }, then this formatter can be applied return "{" + value + "}"; } else { return value; } } @Override public String getDescription() { return Localization.lang("Add braces encapsulating the complete field content."); } @Override public String getExampleInput() { return "In CDMA"; } }
1,045
23.904762
109
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/CleanupUrlFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.net.URLDecoder; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Cleanup URL link. * <p> * Expose string representations URL links clean up logic. * </p> */ public class CleanupUrlFormatter extends Formatter { // This regexp find "url=" or "to=" parameter in full link and get text after them private static final Pattern PATTERN_URL = Pattern.compile("(?:url|to)=([^&]*)"); @Override public String getName() { return Localization.lang("Cleanup URL link"); } @Override public String getKey() { return "cleanup_url"; } /** * Escape and decodes a String from the application/x-www-form-urlencoded MIME format. * <p> * Method will also try to find a URL placed after "url=" or "to=". * <p> * The conversion process is the same as executed by {@link URLDecoder} to try to * take guarantees against code injections. * <p> * The plus sign is replaced by its correspondent code (%2b) to avoid the character * to be replaced by a space during the decoding execution. * * @param url should not be null * @return the decoded URL as a String representation * * @see URLDecoder#decode(String, Charset) */ @Override public String format(String url) { var toDecode = Objects .requireNonNull(url, "Null url") .replaceAll("\\+", "%2b"); Matcher matcher = PATTERN_URL.matcher(toDecode); if (matcher.find()) { return URLDecoder.decode(matcher.group(1), StandardCharsets.UTF_8); } return URLDecoder.decode(toDecode, StandardCharsets.UTF_8); } @Override public String getDescription() { return Localization.lang("Cleanup URL link by removing special symbols and extracting simple link"); } @Override public String getExampleInput() { return "https://www.google.de/url?sa=t&rct=j&q=&esrc=s&source=web&cd=11&cad=" + "rja&uact=8&ved=0ahUKEwjg3ZrB_ZPXAhVGuhoKHYdOBOg4ChAWCCYwAA&url=" + "http%3A%2F%2Fwww.focus.de%2Fgesundheit%2Fratgeber%2Fherz%2Ftest%2" + "Flebenserwartung-werden-sie-100-jahre-alt_aid_363828.html" + "&usg=AOvVaw1G6m2jf-pTHYkXceii4hXU"; } }
2,545
32.5
108
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/ClearFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class ClearFormatter extends Formatter { @Override public String getName() { return Localization.lang("Clear"); } @Override public String getKey() { return "clear"; } @Override public String format(String oldString) { Objects.requireNonNull(oldString); return ""; } @Override public String getDescription() { return Localization.lang("Clears the field completely."); } @Override public String getExampleInput() { return "Obsolete text"; } }
725
19.166667
65
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/EscapeAmpersandsFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class EscapeAmpersandsFormatter extends Formatter { @Override public String getName() { return Localization.lang("Escape ampersands"); } @Override public String getKey() { return "escapeAmpersands"; } @Override public String format(String value) { Objects.requireNonNull(value); StringBuilder result = new StringBuilder(); boolean escape = false; boolean inCommandName = false; boolean inCommand = false; boolean inCommandOption = false; int nestedEnvironments = 0; StringBuilder commandName = new StringBuilder(); for (int i = 0; i < value.length(); i++) { char c = value.charAt(i); // Track whether we are in a LaTeX command of some sort. if (Character.isLetter(c) && (escape || inCommandName)) { inCommandName = true; if (!inCommandOption) { commandName.append(c); } } else if (Character.isWhitespace(c) && (inCommand || inCommandOption)) { // Whitespace } else if (inCommandName) { // This means the command name is ended. // Perhaps the beginning of an argument: if (c == '[') { inCommandOption = true; } else if (inCommandOption && (c == ']')) { // Or the end of an argument: inCommandOption = false; } else if (!inCommandOption && (c == '{')) { inCommandName = false; inCommand = true; } else { // Or simply the end of this command alltogether: commandName.delete(0, commandName.length()); inCommandName = false; } } // If we are in a command body, see if it has ended: if (inCommand && (c == '}')) { if ("begin".equals(commandName.toString())) { nestedEnvironments++; } if ((nestedEnvironments > 0) && "end".equals(commandName.toString())) { nestedEnvironments--; } commandName.delete(0, commandName.length()); inCommand = false; } // We add a backslash before any ampersand characters, with one exception: if // we are inside an \\url{...} command, we should write it as it is. Maybe. if ((c == '&') && !escape && !(inCommand && "url".equals(commandName.toString())) && (nestedEnvironments == 0)) { result.append("\\&"); } else { result.append(c); } escape = c == '\\'; } return result.toString(); } @Override public String getDescription() { return Localization.lang("Escape ampersands"); } @Override public String getExampleInput() { return "Text & with &ampersands"; } }
3,268
33.052083
93
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/EscapeDollarSignFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Matcher; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class EscapeDollarSignFormatter extends Formatter { @Override public String getName() { return Localization.lang("Escape dollar sign"); } @Override public String getKey() { return "escapeDollarSign"; } @Override public String format(String value) { Objects.requireNonNull(value); return value.replaceAll("(?<!\\\\)\\$", Matcher.quoteReplacement("\\$")); } @Override public String getDescription() { return Localization.lang("Escape dollar sign"); } @Override public String getExampleInput() { return "Text$with$dollar$sign"; } }
845
21.864865
81
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/EscapeUnderscoresFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class EscapeUnderscoresFormatter extends Formatter { private static final Pattern UNDERSCORES = Pattern.compile("_"); @Override public String getName() { return Localization.lang("Escape underscores"); } @Override public String getKey() { return "escapeUnderscores"; } @Override public String format(String value) { Objects.requireNonNull(value); return UNDERSCORES.matcher(value).replaceAll("\\\\_"); } @Override public String getDescription() { return Localization.lang("Escape underscores"); } @Override public String getExampleInput() { return "Text_with_underscores"; } }
899
21.5
68
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/HtmlToLatexFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.logic.layout.LayoutFormatter; import org.jabref.logic.util.strings.HTMLUnicodeConversionMaps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class HtmlToLatexFormatter extends Formatter implements LayoutFormatter { private static final Logger LOGGER = LoggerFactory.getLogger(HtmlToLatexFormatter.class); private static final int MAX_TAG_LENGTH = 100; private static final Pattern ESCAPED_PATTERN = Pattern.compile("&#([x]*)([0]*)(\\p{XDigit}+);"); private static final Pattern ESCAPED_PATTERN2 = Pattern.compile("(.)&#([x]*)([0]*)(\\p{XDigit}+);"); private static final Pattern ESCAPED_PATTERN3 = Pattern.compile("&#([x]*)([0]*)(\\p{XDigit}+);"); private static final Pattern ESCAPED_PATTERN4 = Pattern.compile("&(\\w+);"); @Override public String format(String text) { String result = Objects.requireNonNull(text); if (result.isEmpty()) { return result; } StringBuilder sb = new StringBuilder(); // Deal with the form <sup>k</sup>and <sub>k</sub> result = result.replaceAll("<[ ]?sup>([^<]+)</sup>", "\\\\textsuperscript\\{$1\\}"); result = result.replaceAll("<[ ]?sub>([^<]+)</sub>", "\\\\textsubscript\\{$1\\}"); // TODO: maybe rewrite this based on regular expressions instead // Note that (at least) the IEEE Xplore fetcher must be fixed as it relies on the current way to // remove tags for its image alt-tag to equation converter for (int i = 0; i < result.length(); i++) { int c = result.charAt(i); if (c == '<') { int oldI = i; i = readTag(result, i); if (oldI == i) { // just a single <, which needs to be kept sb.append('<'); } } else { sb.append((char) c); } } result = sb.toString(); // Handle text based HTML entities Set<String> patterns = HTMLUnicodeConversionMaps.HTML_LATEX_CONVERSION_MAP.keySet(); for (String pattern : patterns) { result = result.replace(pattern, HTMLUnicodeConversionMaps.HTML_LATEX_CONVERSION_MAP.get(pattern)); } // Handle numerical HTML entities Matcher m = ESCAPED_PATTERN.matcher(result); while (m.find()) { int num = Integer.decode(m.group(1).replace("x", "#") + m.group(3)); if (HTMLUnicodeConversionMaps.NUMERICAL_LATEX_CONVERSION_MAP.containsKey(num)) { result = result.replace("&#" + m.group(1) + m.group(2) + m.group(3) + ";", HTMLUnicodeConversionMaps.NUMERICAL_LATEX_CONVERSION_MAP.get(num)); } } // Combining accents m = ESCAPED_PATTERN2.matcher(result); while (m.find()) { int num = Integer.decode(m.group(2).replace("x", "#") + m.group(4)); if (HTMLUnicodeConversionMaps.ESCAPED_ACCENTS.containsKey(num)) { if ("i".equals(m.group(1))) { result = result.replace(m.group(1) + "&#" + m.group(2) + m.group(3) + m.group(4) + ";", "{\\" + HTMLUnicodeConversionMaps.ESCAPED_ACCENTS.get(num) + "{\\i}}"); } else if ("j".equals(m.group(1))) { result = result.replace(m.group(1) + "&#" + m.group(2) + m.group(3) + m.group(4) + ";", "{\\" + HTMLUnicodeConversionMaps.ESCAPED_ACCENTS.get(num) + "{\\j}}"); } else { result = result.replace(m.group(1) + "&#" + m.group(2) + m.group(3) + m.group(4) + ";", "{\\" + HTMLUnicodeConversionMaps.ESCAPED_ACCENTS.get(num) + "{" + m.group(1) + "}}"); } } } // Find non-converted numerical characters m = ESCAPED_PATTERN3.matcher(result); while (m.find()) { int num = Integer.decode(m.group(1).replace("x", "#") + m.group(3)); LOGGER.warn("HTML escaped char not converted: {}{}{} = {}", m.group(1), m.group(2), m.group(3), " = ", num); } // Remove $$ in case of two adjacent conversions result = result.replace("$$", ""); // Find non-covered special characters with alphabetic codes m = ESCAPED_PATTERN4.matcher(result); while (m.find()) { LOGGER.warn("HTML escaped char not converted: " + m.group(1)); } return result.trim(); } @Override public String getDescription() { return Localization.lang("Converts HTML code to LaTeX code."); } @Override public String getExampleInput() { return "<strong>JabRef</strong>"; } private int readTag(String text, int position) { // Have just read the < character that starts the tag. int index = text.indexOf('>', position); if ((index > position) && ((index - position) < MAX_TAG_LENGTH)) { return index; // Just skip the tag. } else { return position; // Don't do anything. } } @Override public String getName() { return Localization.lang("HTML to LaTeX"); } @Override public String getKey() { return "html_to_latex"; } }
5,572
38.246479
120
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/HtmlToUnicodeFormatter.java
package org.jabref.logic.formatter.bibtexfields; import org.jabref.architecture.ApacheCommonsLang3Allowed; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.logic.layout.LayoutFormatter; import org.apache.commons.lang3.StringEscapeUtils; @ApacheCommonsLang3Allowed("There is no equivalent in Google's Guava") public class HtmlToUnicodeFormatter extends Formatter implements LayoutFormatter { @Override public String getName() { return Localization.lang("HTML to Unicode"); } @Override public String getKey() { return "html_to_unicode"; } @Override public String getDescription() { return Localization.lang("Converts HTML code to Unicode."); } @Override public String getExampleInput() { return "<b>bread</b> &amp; butter"; } @Override public String format(String fieldText) { // StringEscapeUtils converts characters and regex kills tags return StringEscapeUtils.unescapeHtml4(fieldText).replaceAll("<[^>]*>", ""); } }
1,089
26.948718
84
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/LatexCleanupFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Simplifies LaTeX syntax. {@see org.jabref.logic.layout.format.RemoveLatexCommandsFormatter} for a formatter removing LaTeX commands completely. */ public class LatexCleanupFormatter extends Formatter { private static final Pattern REMOVE_REDUNDANT = Pattern .compile("(?<!\\\\[\\p{Alpha}]{0,100}\\{[^\\}]{0,100})\\}([-/ ]?)\\{"); private static final Pattern REPLACE_WITH_AT = Pattern.compile("(^|[^\\\\$])\\$"); private static final Pattern REPLACE_EVERY_OTHER_AT = Pattern.compile("([^@]*)@@([^@]*)@@"); private static final Pattern MOVE_NUMBERS_WITH_OPERATORS = Pattern.compile("([0-9\\(\\.]+[ ]?[-+/]?[ ]?)\\$"); private static final Pattern MOVE_NUMBERS_RIGHT_INTO_EQUATION = Pattern.compile("@@([ ]?[-+/]?[ ]?[0-9\\)\\.]+)"); private static final Pattern ESCAPE_PERCENT_SIGN_ONCE = Pattern.compile("(^|[^\\\\%])%"); @Override public String getName() { return Localization.lang("LaTeX cleanup"); } @Override public String getKey() { return "latex_cleanup"; } @Override public String format(String oldString) { String newValue = oldString; // Remove redundant $, {, and }, but not if the } is part of a command argument: \mbox{-}{GPS} should not be adjusted newValue = newValue.replace("$$", ""); newValue = REMOVE_REDUNDANT.matcher(newValue).replaceAll("$1"); // Move numbers, +, -, /, and brackets into equations newValue = REPLACE_WITH_AT.matcher(newValue).replaceAll("$1@@"); // Replace $, but not \$ with @@ newValue = REPLACE_EVERY_OTHER_AT.matcher(newValue).replaceAll("$1\\$$2@@"); // Replace every other @@ with $ // newValue = newValue.replaceAll("([0-9\\(\\.]+) \\$","\\$$1\\\\ "); // Move numbers followed by a space left of $ inside the equation, e.g., 0.35 $\mu$m newValue = MOVE_NUMBERS_WITH_OPERATORS.matcher(newValue).replaceAll("\\$$1"); // Move numbers, possibly with operators +, -, or /, left of $ into the equation newValue = MOVE_NUMBERS_RIGHT_INTO_EQUATION.matcher(newValue).replaceAll(" $1@@"); // Move numbers right of @@ into the equation newValue = newValue.replace("@@", "$"); // Replace all @@ with $ newValue = newValue.replace(" ", " "); // Clean up newValue = newValue.replace("$$", ""); newValue = newValue.replace(" )$", ")$"); newValue = ESCAPE_PERCENT_SIGN_ONCE.matcher(newValue).replaceAll("$1\\\\%"); // escape %, but do not escapee \% again, used for comments in TeX return newValue; } @Override public String getDescription() { return Localization.lang("Cleans up LaTeX code."); } @Override public String getExampleInput() { return "{VLSI} {DSP}"; } }
2,948
41.73913
167
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/NormalizeDateFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Optional; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.model.entry.Date; /** * This class transforms date to the format yyyy-mm-dd or yyyy-mm.. */ public class NormalizeDateFormatter extends Formatter { @Override public String getName() { return Localization.lang("Normalize date"); } @Override public String getKey() { return "normalize_date"; } /** * Format date string to yyyy-mm-dd or yyyy-mm. Keeps the existing String if it does not match one of the following * formats: * "M/y" (covers 9/15, 9/2015, and 09/2015) * "MMMM (dd), yyyy" (covers September 1, 2015 and September, 2015) * "yyyy-MM-dd" (covers 2009-1-15) * "d.M.uuuu" (covers 15.1.2015) */ @Override public String format(String value) { Optional<Date> parsedDate = Date.parse(value); return parsedDate.map(Date::getNormalized).orElse(value); } @Override public String getDescription() { return Localization.lang("Normalizes the date to ISO date format."); } @Override public String getExampleInput() { return "29.11.2003"; } }
1,278
26.212766
119
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/NormalizeEnDashesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class NormalizeEnDashesFormatter extends Formatter { @Override public String getName() { return Localization.lang("Normalize en dashes"); } @Override public String getKey() { return "normalize_en_dashes"; } @Override public String format(String value) { return value.replaceAll(" - ", " -- "); } @Override public String getDescription() { return Localization.lang("Normalizes the en dashes."); } @Override public String getExampleInput() { return "Winery - A Modeling Tool for TOSCA-based Cloud Applications"; } }
763
22.151515
77
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/NormalizeMonthFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.Optional; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.model.entry.Month; public class NormalizeMonthFormatter extends Formatter { @Override public String getName() { return Localization.lang("Normalize month"); } @Override public String getKey() { return "normalize_month"; } @Override public String format(String value) { Objects.requireNonNull(value); Optional<Month> month = Month.parse(value); return month.map(Month::getJabRefFormat).orElse(value); } @Override public String getDescription() { return Localization.lang("Normalize month to BibTeX standard abbreviation."); } @Override public String getExampleInput() { return "December"; } }
922
22.666667
85
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/NormalizeNamesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.model.entry.AuthorList; /** * Formatter normalizing a list of person names to the BibTeX format. */ public class NormalizeNamesFormatter extends Formatter { @Override public String getName() { return Localization.lang("Normalize names of persons"); } @Override public String getKey() { return "normalize_names"; } @Override public String format(String nameList) { Objects.requireNonNull(nameList); AuthorList authorList = AuthorList.parse(nameList); return authorList.getAsLastFirstNamesWithAnd(false); } @Override public String getDescription() { return Localization.lang("Normalizes lists of persons to the BibTeX standard."); } @Override public String getExampleInput() { return "Albert Einstein and Alan Turing"; } }
1,026
24.04878
88
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/NormalizePagesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.formatter.casechanger.UnprotectTermsFormatter; import org.jabref.logic.l10n.Localization; /** * This class includes sensible defaults for consistent formatting of BibTeX page numbers. * <p> * Format page numbers, separated either by commas or double-hyphens. * Converts the range number format of the <code>pages</code> field to page_number--page_number. * Removes unwanted literals except letters, numbers and -+ signs. * Keeps the existing String if the resulting field does not match the expected Regex. * <p> * From BibTeX manual: * One or more page numbers or range of numbers, such as 42--111 or 7,41,73--97 or 43+ * (the '+' in this last example indicates pages following that don't form a simple range). * To make it easier to maintain Scribe-compatible databases, the standard styles convert * a single dash (as in 7-33) to the double dash used in TEX to denote number ranges (as in 7--33). * <p> * Examples: * * <ul> * <li><code>1-2 -> 1--2</code></li> * <li><code>1---2 -> 1--2</code></li> * <li><code>1-2 -> 1--2</code></li> * <li><code>1,2,3 -> 1,2,3</code></li> * <li><code>{1}-{2} -> 1--2</code></li> * <li><code>43+ -> 43+</code></li> * <li>Invalid -> Invalid</li> * </ul> */ public class NormalizePagesFormatter extends Formatter { private static final Pattern EM_EN_DASH_PATTERN = Pattern.compile("\u2013|\u2014"); private static final Pattern DASHES_DETECT_PATTERN = Pattern.compile("[ ]*-+[ ]*"); private final Formatter unprotectTermsFormatter = new UnprotectTermsFormatter(); @Override public String getName() { return Localization.lang("Normalize page numbers"); } @Override public String getKey() { return "normalize_page_numbers"; } @Override public String format(String value) { Objects.requireNonNull(value); if (value.isEmpty()) { return value; } value = value.trim(); // Remove pages prefix value = value.replace("pp.", "").replace("p.", "").trim(); // replace em and en dashes by -- value = EM_EN_DASH_PATTERN.matcher(value).replaceAll("--"); Matcher matcher = DASHES_DETECT_PATTERN.matcher(value); if (matcher.find() && matcher.start() >= 0) { String fixedValue = matcher.replaceFirst("--"); if (matcher.find()) { // multiple occurrences --> better do no replacement return value; } return unprotectTermsFormatter.format(fixedValue); } return value; } @Override public String getDescription() { return Localization.lang("Normalize pages to BibTeX standard."); } @Override public String getExampleInput() { return "1 - 2"; } }
3,023
31.869565
99
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/OrdinalsToSuperscriptFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * This class transforms ordinal numbers into LaTeX superscripts. */ public class OrdinalsToSuperscriptFormatter extends Formatter { // find possible superscripts on word boundaries private static final Pattern SUPERSCRIPT_DETECT_PATTERN = Pattern.compile("\\b(\\d+)(st|nd|rd|th)\\b", Pattern.CASE_INSENSITIVE | Pattern.MULTILINE); private static final String SUPERSCRIPT_REPLACE_PATTERN = "$1\\\\textsuperscript{$2}"; @Override public String getName() { return Localization.lang("Ordinals to LaTeX superscript"); } @Override public String getKey() { return "ordinals_to_superscript"; } /** * Converts ordinal numbers to superscripts, e.g. 1st, 2nd or 3rd. * Will replace ordinal numbers even if they are semantically wrong, e.g. 21rd * * <example> * 1st Conf. Cloud Computing -> 1\textsuperscript{st} Conf. Cloud Computing * </example> */ @Override public String format(String value) { Objects.requireNonNull(value); if (value.isEmpty()) { // nothing to do return value; } Matcher matcher = SUPERSCRIPT_DETECT_PATTERN.matcher(value); // replace globally // adds superscript tag return matcher.replaceAll(SUPERSCRIPT_REPLACE_PATTERN); } @Override public String getDescription() { return Localization.lang("Converts ordinals to LaTeX superscripts."); } @Override public String getExampleInput() { return "11th"; } }
1,793
26.6
106
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RegexFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class RegexFormatter extends Formatter { public static final String KEY = "regex"; private static final Logger LOGGER = LoggerFactory.getLogger(RegexFormatter.class); private static final Pattern ESCAPED_OPENING_CURLY_BRACE = Pattern.compile("\\\\\\{"); private static final Pattern ESCAPED_CLOSING_CURLY_BRACE = Pattern.compile("\\\\\\}"); /** * Matches text enclosed in curly brackets. The capturing group is used to prevent part of the input from being * replaced. */ private static final Pattern ENCLOSED_IN_CURLY_BRACES = Pattern.compile("\\{.*?}"); private static final String REGEX_CAPTURING_GROUP = "regex"; private static final String REPLACEMENT_CAPTURING_GROUP = "replacement"; /** * Matches a valid argument to the constructor. Two capturing groups are used to parse the {@link * RegexFormatter#regex} and {@link RegexFormatter#replacement} used in {@link RegexFormatter#format(String)} */ private static final Pattern CONSTRUCTOR_ARGUMENT = Pattern.compile( "^\\(\"(?<" + REGEX_CAPTURING_GROUP + ">.*?)\" *?, *?\"(?<" + REPLACEMENT_CAPTURING_GROUP + ">.*)\"\\)$"); // Magic arbitrary unicode char, which will never appear in bibtex files private static final String PLACEHOLDER_FOR_PROTECTED_GROUP = Character.toString('\u0A14'); private static final String PLACEHOLDER_FOR_OPENING_CURLY_BRACE = Character.toString('\u0A15'); private static final String PLACEHOLDER_FOR_CLOSING_CURLY_BRACE = Character.toString('\u0A16'); private final String regex; private final String replacement; /** * Constructs a new regular expression-based formatter with the given RegEx. * * @param input the regular expressions for matching and replacing given in the form {@code ("<regex>", * "<replace>")}. */ public RegexFormatter(String input) { Objects.requireNonNull(input); input = input.trim(); Matcher constructorArgument = CONSTRUCTOR_ARGUMENT.matcher(input); if (constructorArgument.matches()) { regex = constructorArgument.group(REGEX_CAPTURING_GROUP); replacement = constructorArgument.group(REPLACEMENT_CAPTURING_GROUP); } else { regex = null; replacement = null; LOGGER.warn("RegexFormatter could not parse the input: {}", input); } } @Override public String getName() { return Localization.lang("regular expression"); } @Override public String getKey() { return KEY; } private String replaceHonoringProtectedGroups(final String input) { Matcher matcher = ENCLOSED_IN_CURLY_BRACES.matcher(input); List<String> replaced = new ArrayList<>(); while (matcher.find()) { replaced.add(matcher.group()); } String workingString = matcher.replaceAll(PLACEHOLDER_FOR_PROTECTED_GROUP); try { workingString = workingString.replaceAll(regex, replacement); } catch (PatternSyntaxException e) { LOGGER.warn("There is a syntax error in the regular expression \"{}\" used by the regex modifier", regex, e); return input; } for (String r : replaced) { workingString = workingString.replaceFirst(PLACEHOLDER_FOR_PROTECTED_GROUP, r); } return workingString; } @Override public String format(final String input) { Objects.requireNonNull(input); if (regex == null || replacement == null) { return input; } Matcher escapedOpeningCurlyBrace = ESCAPED_OPENING_CURLY_BRACE.matcher(input); String inputWithPlaceholder = escapedOpeningCurlyBrace.replaceAll(PLACEHOLDER_FOR_OPENING_CURLY_BRACE); Matcher escapedClosingCurlyBrace = ESCAPED_CLOSING_CURLY_BRACE.matcher(inputWithPlaceholder); inputWithPlaceholder = escapedClosingCurlyBrace.replaceAll(PLACEHOLDER_FOR_CLOSING_CURLY_BRACE); final String regexMatchesReplaced = replaceHonoringProtectedGroups(inputWithPlaceholder); return regexMatchesReplaced .replaceAll(PLACEHOLDER_FOR_OPENING_CURLY_BRACE, "\\\\{") .replaceAll(PLACEHOLDER_FOR_CLOSING_CURLY_BRACE, "\\\\}"); } @Override public String getDescription() { return Localization.lang("Add a regular expression for the key pattern."); } @Override public String getExampleInput() { return "Please replace the spaces"; } }
4,935
39.459016
121
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RemoveBracesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class RemoveBracesFormatter extends Formatter { @Override public String getName() { return Localization.lang("Remove enclosing braces"); } @Override public String getKey() { return "remove_braces"; } @Override public String format(String value) { Objects.requireNonNull(value); String formatted = value; while ((formatted.length() >= 2) && (formatted.charAt(0) == '{') && (formatted.charAt(formatted.length() - 1) == '}')) { String trimmed = formatted.substring(1, formatted.length() - 1); // It could be that the removed braces were not matching // For example, "{A} test {B}" results in "A} test {B" // In this case, trimmed has a closing } without an opening { before that if (hasNegativeBraceCount(trimmed)) { return formatted; } else { formatted = trimmed; } } return formatted; } @Override public String getDescription() { return Localization.lang("Removes braces encapsulating the complete field content."); } @Override public String getExampleInput() { return "{In CDMA}"; } /** * Check if a string at any point has had more ending } braces than opening { ones. * Will e.g. return true for the string "DNA} blahblal {EPA" * * @param value The string to check. * @return true if at any index the brace count is negative. */ private boolean hasNegativeBraceCount(String value) { int braceCount = 0; for (int index = 0; index < value.length(); index++) { char charAtIndex = value.charAt(index); if (charAtIndex == '{') { braceCount++; } else if (charAtIndex == '}') { braceCount--; } if (braceCount < 0) { return true; } } return false; } }
2,183
28.513514
117
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RemoveDigitsFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class RemoveDigitsFormatter extends Formatter { private static final Pattern DIGITS = Pattern.compile("[ ]\\d+"); @Override public String getName() { return Localization.lang("Remove digits"); } @Override public String getKey() { return "remove_digits"; } @Override public String format(String value) { Objects.requireNonNull(value); return DIGITS.matcher(value).replaceAll(""); } @Override public String getDescription() { return Localization.lang("Removes digits."); } @Override public String getExampleInput() { return "In 012 CDMA"; } }
863
20.6
69
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RemoveHyphenatedNewlinesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Removes all hyphenated line breaks in the string. */ public class RemoveHyphenatedNewlinesFormatter extends Formatter { private static final Pattern HYPHENATED_WORDS = Pattern.compile("-\\R"); @Override public String getName() { return Localization.lang("Remove hyphenated line breaks"); } @Override public String getKey() { return "remove_hyphenated_newlines"; } @Override public String format(String value) { Objects.requireNonNull(value); value = HYPHENATED_WORDS.matcher(value).replaceAll(""); return value.trim(); } @Override public String getDescription() { return Localization.lang("Removes all hyphenated line breaks in the field content."); } @Override public String getExampleInput() { return "Gimme shel-\nter"; } }
1,057
23.604651
93
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RemoveNewlinesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Removes all line breaks in the string. */ public class RemoveNewlinesFormatter extends Formatter { private static final Pattern LINEBREAKS = Pattern.compile("\\R"); @Override public String getName() { return Localization.lang("Remove line breaks"); } @Override public String getKey() { return "remove_newlines"; } @Override public String format(String value) { Objects.requireNonNull(value); value = LINEBREAKS.matcher(value).replaceAll(" "); return value.trim(); } @Override public String getDescription() { return Localization.lang("Removes all line breaks in the field content."); } @Override public String getExampleInput() { return "In \n CDMA"; } }
985
21.930233
82
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/RemoveRedundantSpacesFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Finds any occurrence of consecutive spaces and replaces it with a single space */ public class RemoveRedundantSpacesFormatter extends Formatter { private static final Pattern MULTIPLE_SPACES = Pattern.compile(" {2,}"); @Override public String getName() { return Localization.lang("Remove redundant spaces"); } @Override public String getKey() { return "remove_redundant_spaces"; } @Override public String format(String value) { Objects.requireNonNull(value); return MULTIPLE_SPACES.matcher(value).replaceAll(" "); } @Override public String getDescription() { return Localization.lang("Replaces consecutive spaces with a single space in the field content."); } @Override public String getExampleInput() { return "In CDMA"; } }
1,050
24.02381
106
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/ReplaceTabsBySpaceFormater.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import java.util.regex.Pattern; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Replaces any tab with a space */ public class ReplaceTabsBySpaceFormater extends Formatter { private static final Pattern TAB = Pattern.compile("\t+"); @Override public String getName() { return Localization.lang("Replace tabs with space"); } @Override public String getKey() { return "remove_tabs"; } @Override public String format(String value) { Objects.requireNonNull(value); return TAB.matcher(value).replaceAll(" "); } @Override public String getDescription() { return Localization.lang("Replace tabs with space in the field content."); } @Override public String getExampleInput() { return "In \t\t CDMA"; } }
938
21.357143
82
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/ShortenDOIFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.importer.util.ShortDOIService; import org.jabref.logic.importer.util.ShortDOIServiceException; import org.jabref.logic.l10n.Localization; import org.jabref.model.entry.identifier.DOI; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ShortenDOIFormatter extends Formatter { private static final Logger LOGGER = LoggerFactory.getLogger(ShortenDOIFormatter.class); @Override public String getName() { return Localization.lang("Shorten DOI"); } @Override public String getKey() { return "short_doi"; } @Override public String format(String value) { Objects.requireNonNull(value); return DOI.parse(value) .map(doi -> { try { return new ShortDOIService().getShortDOI(doi).getDOI(); } catch (ShortDOIServiceException e) { LOGGER.error(e.getMessage(), e); return value; } }).orElse(value); } @Override public String getDescription() { return Localization.lang("Shortens DOI to more human readable form."); } @Override public String getExampleInput() { return "10.1006/jmbi.1998.2354"; } }
1,446
26.826923
92
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/TrimWhitespaceFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Trim all whitespace characters (as defined in Java) in the beginning and at the end of the string. */ public class TrimWhitespaceFormatter extends Formatter { @Override public String getName() { return Localization.lang("Trim whitespace characters"); } @Override public String getKey() { return "trim_whitespace"; } @Override public String format(String value) { Objects.requireNonNull(value); return value.trim(); } @Override public String getDescription() { return Localization.lang("Trim all whitespace characters in the field content."); } @Override public String getExampleInput() { return "\r\n InCDMA\n\r "; } }
904
22.205128
101
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/UnicodeToLatexFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.Map; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.logic.layout.LayoutFormatter; import org.jabref.logic.util.strings.HTMLUnicodeConversionMaps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class UnicodeToLatexFormatter extends Formatter implements LayoutFormatter { private static final Logger LOGGER = LoggerFactory.getLogger(UnicodeToLatexFormatter.class); @Override public String format(String text) { String result = Objects.requireNonNull(text); if (result.isEmpty()) { return result; } // Standard symbols for (Map.Entry<String, String> unicodeLatexPair : HTMLUnicodeConversionMaps.UNICODE_LATEX_CONVERSION_MAP .entrySet()) { result = result.replace(unicodeLatexPair.getKey(), unicodeLatexPair.getValue()); } // Combining accents StringBuilder sb = new StringBuilder(); boolean consumed = false; for (int i = 0; i <= (result.length() - 2); i++) { if (!consumed && (i < (result.length() - 1))) { int cpCurrent = result.codePointAt(i); Integer cpNext = result.codePointAt(i + 1); String code = HTMLUnicodeConversionMaps.ESCAPED_ACCENTS.get(cpNext); if (code == null) { // skip next index to avoid reading surrogate as a separate char if (!Character.isBmpCodePoint(cpCurrent)) { i++; } sb.appendCodePoint(cpCurrent); } else { sb.append("{\\").append(code).append('{').append((char) cpCurrent).append("}}"); consumed = true; } } else { consumed = false; } } if (!consumed) { sb.append((char) result.codePointAt(result.length() - 1)); } result = sb.toString(); // Check if any symbols is not converted for (int i = 0; i <= (result.length() - 1); i++) { int cp = result.codePointAt(i); if (cp >= 129) { LOGGER.warn("Unicode character not converted: " + cp); } } return result; } @Override public String getDescription() { return Localization.lang("Converts Unicode characters to LaTeX encoding."); } @Override public String getExampleInput() { return "Mönch"; } @Override public String getName() { return Localization.lang("Unicode to LaTeX"); } @Override public String getKey() { return "unicode_to_latex"; } }
2,832
30.831461
112
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/bibtexfields/UnitsToLatexFormatter.java
package org.jabref.logic.formatter.bibtexfields; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.logic.util.strings.StringLengthComparator; public class UnitsToLatexFormatter extends Formatter { private static final List<String> UNIT_LIST = Arrays.asList( "A", // Ampere "Ah", // Ampere hours "B", // Byte "Bq", // Bequerel "C", // Coulomb "F", // Farad "Gy", // Gray "H", // Henry "Hz", // Hertz "J", // Joule "K", // Kelvin "N", // Newton "\\$\\\\Omega\\$", // Ohm "Pa", // Pascal "S", // Siemens, Samples "Sa", // Samples "Sv", // Sv "T", // Tesla "V", // Volt "VA", // Volt ampere "W", // Watt "Wb", // Weber "Wh", // Watt hours "bar", // bar "b", // bit "cd", // candela "dB", // decibel "dBm", // decibel "dBc", // decibel "eV", // electron volts "inch", // inch "kat", // katal "lm", // lumen "lx", // lux "m", // meters "mol", // mol "rad", // radians "s", // seconds "sr" // steradians ); private static final List<String> UNIT_PREFIX_LIST = Arrays.asList( "y", // yocto "z", // zepto "a", // atto "f", // femto "p", // pico "n", // nano "\\$\\\\mu\\$", // micro "u", // micro "m", // milli "c", // centi "d", // deci "", // no prefix "da", // deca "h", // hekto "k", // kilo "M", // mega "G", // giga "T", // tera "P", // peta "E", // exa "Z", // zetta "Y" // yotta ); private final List<String> prefixUnitCombinations; public UnitsToLatexFormatter() { prefixUnitCombinations = new ArrayList<>( UnitsToLatexFormatter.UNIT_LIST.size() * UnitsToLatexFormatter.UNIT_PREFIX_LIST.size()); for (String unit : UnitsToLatexFormatter.UNIT_LIST) { for (String prefix : UnitsToLatexFormatter.UNIT_PREFIX_LIST) { prefixUnitCombinations.add(prefix + unit); } } Collections.sort(prefixUnitCombinations, new StringLengthComparator()); // Sort based on string length } @Override public String format(String text) { Objects.requireNonNull(text); if (text.isEmpty()) { return text; } // Replace the hyphen in 12-bit etc with a non-breaking hyphen, will also avoid bad casing of 12-Bit String result = text.replaceAll("([0-9,\\.]+)-([Bb][Ii][Tt])", "$1\\\\mbox\\{-\\}$2"); // Replace the space in 12 bit etc with a non-breaking space, will also avoid bad casing of 12 Bit result = result.replaceAll("([0-9,\\.]+) ([Bb][Ii][Tt])", "$1~$2"); // For each word in the list for (String listOfWord : prefixUnitCombinations) { // Add {} if the character before is a space, -, /, (, [, or } or if it is at the start of the string but not if it is followed by a } result = result.replaceAll("([0-9])(" + listOfWord + ")", "$1\\{$2\\}"); // Only add brackets to keep case result = result.replaceAll("([0-9])-(" + listOfWord + ")", "$1\\\\mbox\\{-\\}\\{$2\\}"); // Replace hyphen with non-break hyphen result = result.replaceAll("([0-9]) (" + listOfWord + ")", "$1~\\{$2\\}"); // Replace space with a hard space } return result; } @Override public String getDescription() { return Localization.lang("Converts units to LaTeX formatting."); } @Override public String getExampleInput() { return "1 Hz"; } @Override public String getName() { return Localization.lang("Units to LaTeX"); } @Override public String getKey() { return "units_to_latex"; } }
4,420
30.805755
146
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/CapitalizeFormatter.java
package org.jabref.logic.formatter.casechanger; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class CapitalizeFormatter extends Formatter { @Override public String getName() { return Localization.lang("Capitalize"); } @Override public String getKey() { return "capitalize"; } /** * Converts the first character of each word of the given string to a upper case (and all others to lower case), but does not change words starting with "{" */ @Override public String format(String input) { Title title = new Title(input); title.getWords().stream().forEach(Word::toUpperFirstIgnoreHyphen); return title.toString(); } @Override public String getDescription() { return Localization.lang( "Changes The First Letter Of All Words To Capital Case And The Remaining Letters To Lower Case."); } @Override public String getExampleInput() { return "I have {a} DREAM"; } }
1,059
24.853659
160
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/LowerCaseFormatter.java
package org.jabref.logic.formatter.casechanger; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class LowerCaseFormatter extends Formatter { @Override public String getName() { return Localization.lang("lower case"); } @Override public String getKey() { return "lower_case"; } /** * Converts all characters of the string to lower case, but does not change words starting with "{" */ @Override public String format(String input) { Title title = new Title(input); title.getWords().stream().forEach(Word::toLowerCase); return title.toString(); } @Override public String getDescription() { return Localization.lang("changes all letters to lower case."); } @Override public String getExampleInput() { return "KDE {Amarok}"; } }
907
21.7
103
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/ProtectTermsFormatter.java
package org.jabref.logic.formatter.casechanger; import java.util.List; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.logic.protectedterms.ProtectedTermsLoader; import org.jabref.logic.util.strings.StringLengthComparator; /** * Adds {} brackets around acronyms, month names and countries to preserve their case. * * Related formatter: {@link org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter} */ public class ProtectTermsFormatter extends Formatter { private final ProtectedTermsLoader protectedTermsLoader; public ProtectTermsFormatter(ProtectedTermsLoader protectedTermsLoader) { this.protectedTermsLoader = protectedTermsLoader; } private String format(String text, List<String> listOfWords) { String result = text; listOfWords.sort(new StringLengthComparator()); // For each word in the list for (String listOfWord : listOfWords) { // Add {} if the character before is a space, -, /, (, [, ", or } or if it is at the start of the string but not if it is followed by a } result = result.replaceAll("(^|[- /\\[(}\"])" + listOfWord + "($|[^a-zA-Z}])", "$1\\{" + listOfWord + "\\}$2"); } return result; } @Override public String format(String text) { Objects.requireNonNull(text); if (text.isEmpty()) { return text; } return this.format(text, this.protectedTermsLoader.getProtectedTerms()); } @Override public String getDescription() { return Localization.lang( "Adds {} brackets around acronyms, month names and countries to preserve their case."); } @Override public String getExampleInput() { return "In CDMA"; } @Override public String getName() { return Localization.lang("Protect terms"); } @Override public String getKey() { return "protect_terms"; } }
2,022
30.123077
149
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/SentenceCaseFormatter.java
package org.jabref.logic.formatter.casechanger; import java.util.stream.Collectors; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.model.strings.StringUtil; public class SentenceCaseFormatter extends Formatter { @Override public String getName() { return Localization.lang("Sentence case"); } @Override public String getKey() { return "sentence_case"; } /** * Converts the first character of the first word of the given string to upper case (and the remaining characters of the first word to lower case) and changes other words to lower case, but does not change anything if word starts with "{" */ @Override public String format(String input) { return StringUtil.getStringAsSentences(input) .stream() .map(new LowerCaseFormatter()::format) .map(Title::new) .map(title -> { title.getFirstWord().ifPresent(Word::toUpperFirst); return title; }) .map(Object::toString) .collect(Collectors.joining(" ")); } @Override public String getDescription() { return Localization.lang( "Capitalize the first word, changes other words to lower case."); } @Override public String getExampleInput() { return "i have {Aa} DREAM"; } }
1,463
28.877551
242
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/Title.java
package org.jabref.logic.formatter.casechanger; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; /** * Represents a title of a bibtex entry. */ public final class Title { private final List<Word> words = new LinkedList<>(); public Title(String title) { this.words.addAll(new TitleParser().parse(title)); } public List<Word> getWords() { return Collections.unmodifiableList(words); } public Optional<Word> getFirstWord() { if (getWords().isEmpty()) { return Optional.empty(); } return Optional.of(getWords().get(0)); } public Optional<Word> getLastWord() { if (getWords().isEmpty()) { return Optional.empty(); } return Optional.of(getWords().get(getWords().size() - 1)); } @Override public String toString() { return words.stream().map(Word::toString).collect(Collectors.joining(" ")); } }
1,039
23.186047
83
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/TitleCaseFormatter.java
package org.jabref.logic.formatter.casechanger; import java.util.stream.Collectors; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; import org.jabref.model.strings.StringUtil; public class TitleCaseFormatter extends Formatter { @Override public String getName() { return Localization.lang("Title Case"); } @Override public String getKey() { return "title_case"; } /** * Converts all words to upper case, but converts articles, prepositions, and conjunctions to lower case * Capitalizes first and last word * Does not change words starting with "{" */ @Override public String format(String input) { return StringUtil.getStringAsSentences(input) .stream() .map(sentence -> { Title title = new Title(sentence); title.getWords().stream().filter(Word::isSmallerWord).forEach(Word::toLowerCase); title.getWords().stream().filter(Word::isLargerWord).forEach(Word::toUpperFirstTitle); title.getFirstWord().ifPresent(Word::toUpperFirstTitle); title.getLastWord().ifPresent(Word::toUpperFirstTitle); for (int i = 0; i < (title.getWords().size() - 2); i++) { if (title.getWords().get(i).endsWithColon()) { title.getWords().get(i + 1).toUpperFirstTitle(); } } return title.toString(); }) .collect(Collectors.joining(" ")); } @Override public String getDescription() { return Localization.lang( "Capitalize all Words, but Converts Articles, Prepositions, and Conjunctions to Lower Case."); } @Override public String getExampleInput() { return "{BPMN} conformance In open source Engines"; } }
1,969
31.295082
110
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/TitleParser.java
package org.jabref.logic.formatter.casechanger; import java.util.LinkedList; import java.util.List; import java.util.Optional; /** * Parses a title to a list of words. */ public final class TitleParser { private StringBuilder buffer; private int wordStart; public List<Word> parse(String title) { List<Word> words = new LinkedList<>(); boolean[] isProtected = determineProtectedChars(title); reset(); int index = 0; for (char c : title.toCharArray()) { if (Character.isWhitespace(c)) { createWord(isProtected).ifPresent(words::add); } else { if (wordStart == -1) { wordStart = index; } buffer.append(c); } index++; } createWord(isProtected).ifPresent(words::add); return words; } private Optional<Word> createWord(boolean[] isProtected) { if (buffer.length() <= 0) { return Optional.empty(); } char[] chars = buffer.toString().toCharArray(); boolean[] protectedChars = new boolean[chars.length]; System.arraycopy(isProtected, wordStart, protectedChars, 0, chars.length); reset(); return Optional.of(new Word(chars, protectedChars)); } private void reset() { wordStart = -1; buffer = new StringBuilder(); } private static boolean[] determineProtectedChars(String title) { boolean[] isProtected = new boolean[title.length()]; char[] chars = title.toCharArray(); int brackets = 0; for (int i = 0; i < title.length(); i++) { if (chars[i] == '{') { brackets++; } else if (chars[i] == '}') { brackets--; } else { isProtected[i] = brackets > 0; } } return isProtected; } }
1,951
23.708861
82
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/UnprotectTermsFormatter.java
package org.jabref.logic.formatter.casechanger; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Remove {} braces around words in case they appear balanced * * Related formatter: {@link ProtectTermsFormatter} */ public class UnprotectTermsFormatter extends Formatter { @Override public String format(String text) { // similar implementation at {@link org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter.hasNegativeBraceCount} Objects.requireNonNull(text); if (text.isEmpty()) { return text; } StringBuilder result = new StringBuilder(); int level = 0; int index = 0; do { char charAtIndex = text.charAt(index); if (charAtIndex == '{') { level++; } else if (charAtIndex == '}') { level--; } else { result.append(charAtIndex); } index++; } while (index < text.length() && level >= 0); if (level != 0) { // in case of unbalanced braces, the original text is returned unmodified return text; } return result.toString(); } @Override public String getDescription() { return Localization.lang( "Removes all balanced {} braces around words."); } @Override public String getExampleInput() { return "{In} {CDMA}"; } @Override public String getName() { return Localization.lang("Unprotect terms"); } @Override public String getKey() { return "unprotect_terms"; } }
1,705
25.65625
128
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/UpperCaseFormatter.java
package org.jabref.logic.formatter.casechanger; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Converts all characters of the given string to upper case, but does not change words starting with "{" */ public class UpperCaseFormatter extends Formatter { @Override public String getName() { return Localization.lang("UPPER CASE"); } @Override public String getKey() { return "upper_case"; } @Override public String format(String input) { Title title = new Title(input); title.getWords().stream().forEach(Word::toUpperCase); return title.toString(); } @Override public String getDescription() { return Localization.lang( "CHANGES ALL LETTER TO UPPER CASE."); } @Override public String getExampleInput() { return "Kde {Amarok}"; } }
917
21.390244
105
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/casechanger/Word.java
package org.jabref.logic.formatter.casechanger; import java.util.Arrays; import java.util.HashSet; import java.util.Locale; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; /** * Represents a word in a title of a bibtex entry. * <p> * A word can have protected chars (enclosed in '{' '}') and may be a small (a, an, the, ...) word. */ public final class Word { /** * Set containing common lowercase function words */ public static final Set<String> SMALLER_WORDS; public static final Set<Character> DASHES; public static final Set<String> CONJUNCTIONS; private final char[] chars; private final boolean[] protectedChars; static { Set<String> smallerWords = new HashSet<>(); Set<Character> dashes = new HashSet<>(); Set<String> conjunctions = new HashSet<>(); // Conjunctions used as part of Title case capitalisation to specifically check if word is conjunction or not conjunctions.addAll(Arrays.asList("and", "but", "for", "nor", "or", "so", "yet")); // Articles smallerWords.addAll(Arrays.asList("a", "an", "the")); // Prepositions smallerWords.addAll(Arrays.asList("above", "about", "across", "against", "along", "among", "around", "at", "before", "behind", "below", "beneath", "beside", "between", "beyond", "by", "down", "during", "except", "for", "from", "in", "inside", "into", "like", "near", "of", "off", "on", "onto", "since", "to", "toward", "through", "under", "until", "up", "upon", "with", "within", "without")); // Conjunctions used as part of all case capitalisation to check if it is a small word or not smallerWords.addAll(conjunctions); // Dashes dashes.addAll(Arrays.asList( '-', '~', '⸗', '〰', '᐀', '֊', '־', '‐', '‑', '‒', '–', '—', '―', '⁓', '⁻', '₋', '−', '⸺', '⸻', '〜', '゠', '︱', '︲', '﹘', '﹣', '-' )); // unmodifiable for thread safety DASHES = dashes; // unmodifiable for thread safety CONJUNCTIONS = conjunctions; // unmodifiable for thread safety SMALLER_WORDS = smallerWords.stream() .map(word -> word.toLowerCase(Locale.ROOT)) .collect(Collectors.toUnmodifiableSet()); } public Word(char[] chars, boolean[] protectedChars) { this.chars = Objects.requireNonNull(chars); this.protectedChars = Objects.requireNonNull(protectedChars); if (this.chars.length != this.protectedChars.length) { throw new IllegalArgumentException("the chars and the protectedChars array must be of same length"); } } /** * Case-insensitive check against {@link Word#SMALLER_WORDS}. Checks for common function words. */ public static boolean isSmallerWord(String word) { return SMALLER_WORDS.contains(word.toLowerCase(Locale.ROOT)); } /** * Only change letters of the word that are unprotected to upper case. */ public void toUpperCase() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = Character.toUpperCase(chars[i]); } } } /** * Only change letters of the word that are unprotected to lower case. */ public void toLowerCase() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = Character.toLowerCase(chars[i]); } } } public void toUpperFirst() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = (i == 0) ? Character.toUpperCase(chars[i]) : Character.toLowerCase(chars[i]); } } } public void toUpperFirstIgnoreHyphen() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = (i == 0 || (DASHES.contains(chars[i - 1]))) ? Character.toUpperCase(chars[i]) : Character.toLowerCase(chars[i]); } } } public void toUpperFirstTitle() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = (i == 0 || (DASHES.contains(chars[i - 1]) && isConjunction(chars, i))) ? Character.toUpperCase(chars[i]) : Character.toLowerCase(chars[i]); } } } private boolean isConjunction(char[] chars, int i) { String word = ""; while (i < chars.length && !DASHES.contains(chars[i])) { word += chars[i]; i++; } return !CONJUNCTIONS.contains(word); } public void stripConsonants() { for (int i = 0; i < chars.length; i++) { if (!protectedChars[i]) { chars[i] = (i == 0 || DASHES.contains(chars[i - 1])) ? Character.toUpperCase(chars[i]) : Character.toLowerCase(chars[i]); } } } public boolean isSmallerWord() { // "word:" is still a small "word" return SMALLER_WORDS.contains(this.toString().replace(":", "").toLowerCase(Locale.ROOT)); } public boolean isLargerWord() { return !isSmallerWord(); } @Override public String toString() { return new String(chars); } public boolean endsWithColon() { return this.toString().endsWith(":"); } }
5,672
34.018519
400
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/minifier/MinifyNameListFormatter.java
package org.jabref.logic.formatter.minifier; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; /** * Replaces three or more authors with and others */ public class MinifyNameListFormatter extends Formatter { @Override public String getName() { return Localization.lang("Minify list of person names"); } @Override public String getKey() { return "minify_name_list"; } /** * Replaces three or more authors with and others. * * <example> * Stefan Kolb -> Stefan Kolb * Stefan Kolb and Simon Harrer -> Stefan Kolb and Simon Harrer * Stefan Kolb and Simon Harrer and Jörg Lenhard -> Stefan Kolb and others * </example> */ @Override public String format(String value) { Objects.requireNonNull(value); if (value.isEmpty()) { // nothing to do return value; } return abbreviateAuthor(value); } @Override public String getDescription() { return Localization.lang("Shortens lists of persons if there are more than 2 persons to \"et al.\"."); } @Override public String getExampleInput() { return "Stefan Kolb and Simon Harrer and Oliver Kopp"; } private String abbreviateAuthor(String authorField) { // single author String authorSeparator = " and "; if (!authorField.contains(authorSeparator)) { return authorField; } String[] authors = authorField.split(authorSeparator); // trim authors for (int i = 0; i < authors.length; i++) { authors[i] = authors[i].trim(); } // already abbreviated if ("others".equals(authors[authors.length - 1]) && (authors.length == 2)) { return authorField; } // abbreviate if (authors.length < 3) { return authorField; } return authors[0] + authorSeparator + "others"; } }
2,051
24.333333
110
java
null
jabref-main/src/main/java/org/jabref/logic/formatter/minifier/TruncateFormatter.java
package org.jabref.logic.formatter.minifier; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.logic.l10n.Localization; public class TruncateFormatter extends Formatter { private final int TRUNCATE_AFTER; private final String KEY; /** * The TruncateFormatter truncates a string after the given index and removes trailing whitespaces. * * @param truncateIndex truncate a string after this index. */ public TruncateFormatter(final int truncateIndex) { TRUNCATE_AFTER = (truncateIndex >= 0) ? truncateIndex : Integer.MAX_VALUE; KEY = "truncate" + TRUNCATE_AFTER; } @Override public String getName() { return Localization.lang("Truncate"); } @Override public String getKey() { return KEY; } /** * Truncates a string after the given index. */ @Override public String format(final String input) { Objects.requireNonNull(input); final int index = Math.min(TRUNCATE_AFTER, input.length()); return input.substring(0, index).stripTrailing(); } @Override public String getDescription() { return Localization.lang("Truncates a string after a given index."); } @Override public String getExampleInput() { return "Truncate this sentence."; } }
1,362
25.211538
103
java
null
jabref-main/src/main/java/org/jabref/logic/git/GitHandler.java
package org.jabref.logic.git; import java.io.File; import java.io.IOException; import java.net.URISyntaxException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Optional; import org.jabref.logic.util.io.FileUtil; import org.eclipse.jgit.api.Git; import org.eclipse.jgit.api.RmCommand; import org.eclipse.jgit.api.Status; import org.eclipse.jgit.api.errors.GitAPIException; import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.merge.MergeStrategy; import org.eclipse.jgit.transport.CredentialsProvider; import org.eclipse.jgit.transport.UsernamePasswordCredentialsProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class handles the updating of the local and remote git repository that is located at the repository path * This provides an easy-to-use interface to manage a git repository */ public class GitHandler { static final Logger LOGGER = LoggerFactory.getLogger(GitHandler.class); final Path repositoryPath; final File repositoryPathAsFile; String gitUsername = Optional.ofNullable(System.getenv("GIT_EMAIL")).orElse(""); String gitPassword = Optional.ofNullable(System.getenv("GIT_PW")).orElse(""); final CredentialsProvider credentialsProvider = new UsernamePasswordCredentialsProvider(gitUsername, gitPassword); /** * Initialize the handler for the given repository * * @param repositoryPath The root of the initialized git repository */ public GitHandler(Path repositoryPath) { this.repositoryPath = repositoryPath; this.repositoryPathAsFile = this.repositoryPath.toFile(); if (!isGitRepository()) { try { Git.init() .setDirectory(repositoryPathAsFile) .setInitialBranch("main") .call(); setupGitIgnore(); String initialCommit = "Initial commit"; if (!createCommitOnCurrentBranch(initialCommit, false)) { // Maybe, setupGitIgnore failed and did not add something // Then, we create an empty commit try (Git git = Git.open(repositoryPathAsFile)) { git.commit() .setAllowEmpty(true) .setMessage(initialCommit) .call(); } } } catch (GitAPIException | IOException e) { LOGGER.error("Initialization failed"); } } } void setupGitIgnore() { try { Path gitignore = Path.of(repositoryPath.toString(), ".gitignore"); if (!Files.exists(gitignore)) { FileUtil.copyFile(Path.of(this.getClass().getResource("git.gitignore").toURI()), gitignore, false); } } catch (URISyntaxException e) { LOGGER.error("Error occurred during copying of the gitignore file into the git repository."); } } /** * Returns true if the given path points to a directory that is a git repository (contains a .git folder) */ boolean isGitRepository() { // For some reason the solution from https://www.eclipse.org/lists/jgit-dev/msg01892.html does not work // This solution is quite simple but might not work in special cases, for us it should suffice. return Files.exists(Path.of(repositoryPath.toString(), ".git")); } /** * Checkout the branch with the specified name, if it does not exist create it * * @param branchToCheckout Name of the branch to check out */ public void checkoutBranch(String branchToCheckout) throws IOException, GitAPIException { try (Git git = Git.open(this.repositoryPathAsFile)) { Optional<Ref> branch = getRefForBranch(branchToCheckout); git.checkout() // If the branch does not exist, create it .setCreateBranch(branch.isEmpty()) .setName(branchToCheckout) .call(); } } /** * Returns the reference of the specified branch * If it does not exist returns an empty optional */ Optional<Ref> getRefForBranch(String branchName) throws GitAPIException, IOException { try (Git git = Git.open(this.repositoryPathAsFile)) { return git.branchList() .call() .stream() .filter(ref -> ref.getName().equals("refs/heads/" + branchName)) .findAny(); } } /** * Creates a commit on the currently checked out branch * * @param amend Whether to amend to the last commit (true), or not (false) * @return Returns true if a new commit was created. This is the case if the repository was not clean on method invocation */ public boolean createCommitOnCurrentBranch(String commitMessage, boolean amend) throws IOException, GitAPIException { boolean commitCreated = false; try (Git git = Git.open(this.repositoryPathAsFile)) { Status status = git.status().call(); if (!status.isClean()) { commitCreated = true; // Add new and changed files to index git.add() .addFilepattern(".") .call(); // Add all removed files to index if (!status.getMissing().isEmpty()) { RmCommand removeCommand = git.rm() .setCached(true); status.getMissing().forEach(removeCommand::addFilepattern); removeCommand.call(); } git.commit() .setAmend(amend) .setAllowEmpty(false) .setMessage(commitMessage) .call(); } } return commitCreated; } /** * Merges the source branch into the target branch * * @param targetBranch the name of the branch that is merged into * @param sourceBranch the name of the branch that gets merged */ public void mergeBranches(String targetBranch, String sourceBranch, MergeStrategy mergeStrategy) throws IOException, GitAPIException { String currentBranch = this.getCurrentlyCheckedOutBranch(); try (Git git = Git.open(this.repositoryPathAsFile)) { Optional<Ref> sourceBranchRef = getRefForBranch(sourceBranch); if (sourceBranchRef.isEmpty()) { // Do nothing return; } this.checkoutBranch(targetBranch); git.merge() .include(sourceBranchRef.get()) .setStrategy(mergeStrategy) .setMessage("Merge search branch into working branch.") .call(); } this.checkoutBranch(currentBranch); } /** * Pushes all commits made to the branch that is tracked by the currently checked out branch. * If pushing to remote fails, it fails silently. */ public void pushCommitsToRemoteRepository() throws IOException { try (Git git = Git.open(this.repositoryPathAsFile)) { try { git.push() .setCredentialsProvider(credentialsProvider) .call(); } catch (GitAPIException e) { LOGGER.info("Failed to push"); } } } public void pullOnCurrentBranch() throws IOException { try (Git git = Git.open(this.repositoryPathAsFile)) { try { git.pull() .setCredentialsProvider(credentialsProvider) .call(); } catch (GitAPIException e) { LOGGER.info("Failed to push"); } } } public String getCurrentlyCheckedOutBranch() throws IOException { try (Git git = Git.open(this.repositoryPathAsFile)) { return git.getRepository().getBranch(); } } }
8,103
37.961538
138
java
null
jabref-main/src/main/java/org/jabref/logic/git/SlrGitHandler.java
package org.jabref.logic.git; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.StringJoiner; import org.jabref.logic.crawler.StudyRepository; import org.eclipse.jgit.api.Git; import org.eclipse.jgit.api.errors.GitAPIException; import org.eclipse.jgit.diff.DiffEntry; import org.eclipse.jgit.diff.DiffFormatter; import org.eclipse.jgit.lib.ObjectId; import org.eclipse.jgit.lib.ObjectReader; import org.eclipse.jgit.lib.Ref; import org.eclipse.jgit.lib.Repository; import org.eclipse.jgit.treewalk.CanonicalTreeParser; public class SlrGitHandler extends GitHandler { /** * Initialize the handler for the given repository * * @param repositoryPath The root of the initialized git repository */ public SlrGitHandler(Path repositoryPath) { super(repositoryPath); } public void appendLatestSearchResultsOntoCurrentBranch(String patchMessage, String searchBranchName) throws IOException, GitAPIException { // Calculate and apply new search results to work branch String patch = calculatePatchOfNewSearchResults(searchBranchName); Map<Path, String> result = parsePatchForAddedEntries(patch); applyPatch(result); this.createCommitOnCurrentBranch(patchMessage, false); } /** * Calculates the diff between the HEAD and the previous commit of the sourceBranch. * * @param sourceBranch The name of the branch that is the target of the calculation * @return Returns the patch (diff) between the head of the sourceBranch and its previous commit HEAD^1 */ String calculatePatchOfNewSearchResults(String sourceBranch) throws IOException, GitAPIException { try (Git git = Git.open(this.repositoryPathAsFile)) { Optional<Ref> sourceBranchRef = getRefForBranch(sourceBranch); if (sourceBranchRef.isEmpty()) { return ""; } Repository repository = git.getRepository(); ObjectId branchHead = sourceBranchRef.get().getObjectId(); ObjectId treeIdHead = repository.resolve(branchHead.getName() + "^{tree}"); ObjectId treeIdHeadParent = repository.resolve(branchHead.getName() + "~1^{tree}"); try (ObjectReader reader = repository.newObjectReader()) { CanonicalTreeParser oldTreeIter = new CanonicalTreeParser(); oldTreeIter.reset(reader, treeIdHeadParent); CanonicalTreeParser newTreeIter = new CanonicalTreeParser(); newTreeIter.reset(reader, treeIdHead); ByteArrayOutputStream put = new ByteArrayOutputStream(); try (DiffFormatter formatter = new DiffFormatter(put)) { formatter.setRepository(git.getRepository()); List<DiffEntry> entries = formatter.scan(oldTreeIter, newTreeIter); for (DiffEntry entry : entries) { if (entry.getChangeType().equals(DiffEntry.ChangeType.MODIFY)) { formatter.format(entry); } } formatter.flush(); return put.toString(); } } } } /** * Applies the provided patch on the current branch * Ignores any changes made to the study definition file. * The reason for this is that the study definition file cannot be patched the same way as the bib files, as the * order of fields in the yml file matters. * * @param patch the patch (diff) as a string * @return Returns a map where each file has its path as a key and the string contains the hunk of new results */ Map<Path, String> parsePatchForAddedEntries(String patch) throws IOException, GitAPIException { String[] tokens = patch.split("\n"); // Tracks for each file the related diff. Represents each file by its relative path Map<Path, String> diffsPerFile = new HashMap<>(); boolean content = false; StringJoiner joiner = null; String relativePath = null; for (String currentToken : tokens) { // Begin of a new diff if (currentToken.startsWith("diff --git a/")) { // If the diff is related to a different file, save the diff for the previous file if (!(Objects.isNull(relativePath) || Objects.isNull(joiner))) { if (!relativePath.contains(StudyRepository.STUDY_DEFINITION_FILE_NAME)) { diffsPerFile.put(Path.of(repositoryPath.toString(), relativePath), joiner.toString()); } } // Find the relative path of the file that is related with the current diff relativePath = currentToken.substring(13, currentToken.indexOf(" b/")); content = false; joiner = new StringJoiner("\n"); continue; } // From here on content follows if (currentToken.startsWith("@@ ") && currentToken.endsWith(" @@")) { content = true; continue; } // Only add "new" lines to diff (no context lines) if (content && currentToken.startsWith("+")) { // Do not include + sign if (joiner != null) { joiner.add(currentToken.substring(1)); } } } if (!(Objects.isNull(relativePath) || Objects.isNull(joiner))) { // For the last file this has to be done at the end diffsPerFile.put(Path.of(repositoryPath.toString(), relativePath), joiner.toString()); } return diffsPerFile; } /** * Applies for each file (specified as keys), the calculated patch (specified as the value) * The patch is inserted between the encoding and the contents of the bib files. */ void applyPatch(Map<Path, String> patch) { patch.keySet().forEach(path -> { try { String currentContent = Files.readString(path); String prefix = ""; if (currentContent.startsWith("% Encoding:")) { int endOfEncoding = currentContent.indexOf("\n"); // Include Encoding and the empty line prefix = currentContent.substring(0, endOfEncoding + 1) + "\n"; currentContent = currentContent.substring(endOfEncoding + 2); } Files.writeString(path, prefix + patch.get(path) + currentContent, StandardCharsets.UTF_8); } catch (IOException e) { LOGGER.error("Could not apply patch."); } }); } }
7,012
43.106918
142
java
null
jabref-main/src/main/java/org/jabref/logic/groups/DefaultGroupsFactory.java
package org.jabref.logic.groups; import org.jabref.logic.l10n.Localization; import org.jabref.model.groups.AllEntriesGroup; public class DefaultGroupsFactory { private static String ALL_ENTRIES_GROUP_DEFAULT_ICON = "ALL_ENTRIES_GROUP_ICON"; private DefaultGroupsFactory() { } public static AllEntriesGroup getAllEntriesGroup() { AllEntriesGroup group = new AllEntriesGroup(Localization.lang("All entries")); group.setIconName(ALL_ENTRIES_GROUP_DEFAULT_ICON); return group; } }
526
26.736842
86
java
null
jabref-main/src/main/java/org/jabref/logic/help/HelpFile.java
package org.jabref.logic.help; /** * This enum globally defines all help pages with the name of the markdown file in the help repository at Github. * * @see <a href="https://github.com/JabRef/user-documentation">user-documentation@github</a> */ public enum HelpFile { // empty string denotes that it refers to the TOC/index CONTENTS(""), // this is always the index ENTRY_EDITOR("advanced/entryeditor"), STRING_EDITOR("setup/stringeditor"), GROUPS("finding-sorting-and-cleaning-entries/groups#groups-structure-creating-and-removing-groups"), SPECIAL_FIELDS("finding-sorting-and-cleaning-entries/specialfields"), CITATION_KEY_PATTERN("setup/citationkeypatterns"), OWNER("advanced/entryeditor/owner"), TIMESTAMP("advanced/entryeditor/timestamp"), CUSTOM_EXPORTS_NAME_FORMATTER("collaborative-work/export/customexports#using-custom-name-formatters"), GENERAL_FIELDS("setup/generalfields"), REMOTE("advanced/remote"), REGEX_SEARCH("finding-sorting-and-cleaning-entries/filelinks#using-regular-expression-search-for-auto-linking"), PREVIEW("setup/preview"), AUTOSAVE("advanced/autosave"), // The help page covers both OO and LO. OPENOFFICE_LIBREOFFICE("cite/openofficeintegration"), FETCHER_ACM("collect/import-using-online-bibliographic-database#acmportal"), FETCHER_ADS("collect/import-using-online-bibliographic-database#ads"), FETCHER_BIBSONOMY_SCRAPER(""), FETCHER_CITESEERX("collect/import-using-online-bibliographic-database#citeseer"), FETCHER_DBLP("collect/import-using-online-bibliographic-database#dblp"), FETCHER_DIVA("collect/add-entry-using-an-id"), FETCHER_DOAJ("collect/import-using-online-bibliographic-database#doaj"), FETCHER_DOI("collect/add-entry-using-an-id"), FETCHER_GOOGLE_SCHOLAR("collect/import-using-online-bibliographic-database#googlescholar"), FETCHER_GVK("collect/import-using-online-bibliographic-database#gvk"), FETCHER_IEEEXPLORE("collect/import-using-online-bibliographic-database#ieeexplore"), FETCHER_INSPIRE("collect/import-using-online-bibliographic-database#inspire"), FETCHER_ISBN("collect/add-entry-using-an-id"), FETCHER_MEDLINE("collect/import-using-online-bibliographic-database#medline"), FETCHER_OAI2_ARXIV("collect/import-using-online-bibliographic-database#arxiv"), FETCHER_RFC("collect/add-entry-using-an-id"), FETCHER_SPRINGER("collect/import-using-online-bibliographic-database#springer"), FETCHER_TITLE("collect/add-entry-using-an-id"), FETCHER_SCIENCEDIRECT(""), DATABASE_PROPERTIES("setup/databaseproperties"), FIND_DUPLICATES("finding-sorting-and-cleaning-entries/findduplicates"), SQL_DATABASE_MIGRATION("collaborative-work/sqldatabase/sqldatabasemigration"); private final String pageName; /** * Sets the URL path part of the help page. * * @param pageName the URL path part of the help page */ HelpFile(String pageName) { this.pageName = pageName; } /** * Returns the URL path part of the help page. * * @return the URL path part of the help page */ public String getPageName() { return pageName; } }
3,204
45.449275
116
java
null
jabref-main/src/main/java/org/jabref/logic/importer/AuthorListParser.java
package org.jabref.logic.importer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Optional; import java.util.Set; import org.jabref.model.entry.Author; import org.jabref.model.entry.AuthorList; import org.jabref.model.strings.StringUtil; public class AuthorListParser { // Avoid partition where these values are contained private final static Set<String> AVOID_TERMS_IN_LOWER_CASE = Set.of( "jr", "sr", "jnr", "snr", "von", "zu", "van", "der"); private static final int TOKEN_GROUP_LENGTH = 4; // number of entries for a token // the following are offsets of an entry in a group of entries for one token private static final int OFFSET_TOKEN = 0; // String -- token itself; private static final int OFFSET_TOKEN_ABBR = 1; // String -- token abbreviation; private static final int OFFSET_TOKEN_TERM = 2; // Character -- token terminator (either " " or // "-") comma) // Constant HashSet containing names of TeX special characters private static final Set<String> TEX_NAMES = Set.of( "aa", "ae", "l", "o", "oe", "i", "AA", "AE", "L", "O", "OE", "j"); /** * the raw bibtex author/editor field */ private String original; /** * index of the start in original, for example to point to 'abc' in 'abc xyz', tokenStart=2 */ private int tokenStart; /** * index of the end in original, for example to point to 'abc' in 'abc xyz', tokenEnd=5 */ private int tokenEnd; /** * end of token abbreviation (always: tokenStart < tokenAbbrEnd <= tokenEnd), only valid if getToken returns * Token.WORD */ private int tokenAbbrEnd; /** * either space of dash */ private char tokenTerm; /** * true if upper-case token, false if lower-case */ private boolean tokenCase; /** * Builds a new array of strings with stringbuilder. Regarding to the name affixes. * * @return New string with correct seperation */ private static StringBuilder buildWithAffix(Collection<Integer> indexArray, List<String> nameList) { StringBuilder stringBuilder = new StringBuilder(); // avoidedTimes needs to be increased by the count of avoided terms for correct odd/even calculation int avoidedTimes = 0; for (int i = 0; i < nameList.size(); i++) { if (indexArray.contains(i)) { // We hit a name affix stringBuilder.append(nameList.get(i)); stringBuilder.append(','); avoidedTimes++; } else { stringBuilder.append(nameList.get(i)); if (((i + avoidedTimes) % 2) == 0) { // Hit separation between last name and firstname --> comma has to be kept stringBuilder.append(','); } else { // Hit separation between full names (e.g., Ali Babar, M. and Dingsøyr, T.) --> semicolon has to be used // Will be treated correctly by AuthorList.parse(authors); stringBuilder.append(';'); } } } return stringBuilder; } /** * Parses the String containing person names and returns a list of person information. * * @param listOfNames the String containing the person names to be parsed * @return a parsed list of persons */ public AuthorList parse(String listOfNames) { Objects.requireNonNull(listOfNames); // Handling of "and others" // Remove it from the list; it will be added at the very end of this method as special Author.OTHERS listOfNames = listOfNames.trim(); final String andOthersSuffix = " and others"; final boolean andOthersPresent; if (StringUtil.endsWithIgnoreCase(listOfNames, andOthersSuffix)) { andOthersPresent = true; listOfNames = StringUtil.removeStringAtTheEnd(listOfNames, " and others"); } else { andOthersPresent = false; } // Handle case names in order lastname, firstname and separated by "," // E.g., Ali Babar, M., Dingsøyr, T., Lago, P., van der Vliet, H. final boolean authorsContainAND = listOfNames.toUpperCase(Locale.ENGLISH).contains(" AND "); final boolean authorsContainOpeningBrace = listOfNames.contains("{"); final boolean authorsContainSemicolon = listOfNames.contains(";"); final boolean authorsContainTwoOrMoreCommas = (listOfNames.length() - listOfNames.replace(",", "").length()) >= 2; if (!authorsContainAND && !authorsContainOpeningBrace && !authorsContainSemicolon && authorsContainTwoOrMoreCommas) { List<String> arrayNameList = Arrays.asList(listOfNames.split(",")); // Delete spaces for correct case identification arrayNameList.replaceAll(String::trim); // Looking for space between pre- and lastname boolean spaceInAllParts = arrayNameList.stream().filter(name -> name.contains(" ")) .count() == arrayNameList.size(); // We hit the comma name separator case // Usually the getAsLastFirstNamesWithAnd method would separate them if pre- and lastname are separated with "and" // If not, we check if spaces separate pre- and lastname if (spaceInAllParts) { listOfNames = listOfNames.replaceAll(",", " and"); } else { // Looking for name affixes to avoid // arrayNameList needs to reduce by the count off avoiding terms // valuePartsCount holds the count of name parts without the avoided terms int valuePartsCount = arrayNameList.size(); // Holds the index of each term which needs to be avoided Collection<Integer> avoidIndex = new HashSet<>(); for (int i = 0; i < arrayNameList.size(); i++) { if (AVOID_TERMS_IN_LOWER_CASE.contains(arrayNameList.get(i).toLowerCase(Locale.ROOT))) { avoidIndex.add(i); valuePartsCount--; } } if ((valuePartsCount % 2) == 0) { // We hit the described special case with name affix like Jr listOfNames = buildWithAffix(avoidIndex, arrayNameList).toString(); } } } // initialization of parser original = listOfNames; tokenStart = 0; tokenEnd = 0; // Parse author by author List<Author> authors = new ArrayList<>(5); // 5 seems to be reasonable initial size while (tokenStart < original.length()) { getAuthor().ifPresent(authors::add); } if (andOthersPresent) { authors.add(Author.OTHERS); } return AuthorList.of(authors); } /** * Parses one author name and returns preformatted information. * * @return Preformatted author name; <CODE>Optional.empty()</CODE> if author name is empty. */ private Optional<Author> getAuthor() { List<Object> tokens = new ArrayList<>(); // initialization int vonStart = -1; int lastStart = -1; int commaFirst = -1; int commaSecond = -1; // First step: collect tokens in 'tokens' Vector and calculate indices boolean continueLoop = true; while (continueLoop) { Token token = getToken(); switch (token) { case EOF: case AND: continueLoop = false; break; case COMMA: if (commaFirst < 0) { commaFirst = tokens.size(); } else if (commaSecond < 0) { commaSecond = tokens.size(); } break; case WORD: tokens.add(original.substring(tokenStart, tokenEnd)); tokens.add(original.substring(tokenStart, tokenAbbrEnd)); tokens.add(tokenTerm); tokens.add(tokenCase); if (commaFirst >= 0) { break; } if (lastStart >= 0) { break; } if (vonStart < 0) { if (!tokenCase) { int previousTermToken = (tokens.size() - TOKEN_GROUP_LENGTH - TOKEN_GROUP_LENGTH) + OFFSET_TOKEN_TERM; if ((previousTermToken >= 0) && tokens.get(previousTermToken).equals('-')) { // We are in a first name which contained a hyphen break; } int thisTermToken = previousTermToken + TOKEN_GROUP_LENGTH; if ((thisTermToken >= 0) && tokens.get(thisTermToken).equals('-')) { // We are in a name which contained a hyphen break; } vonStart = tokens.size() - TOKEN_GROUP_LENGTH; break; } } else if (tokenCase) { lastStart = tokens.size() - TOKEN_GROUP_LENGTH; break; } break; default: break; } } // Second step: split name into parts (here: calculate indices // of parts in 'tokens' Vector) if (tokens.isEmpty()) { return Optional.empty(); // no author information } // the following negatives indicate absence of the corresponding part int firstPartStart = -1; int vonPartStart = -1; int lastPartStart = -1; int jrPartStart = -1; int firstPartEnd; int vonPartEnd = 0; int lastPartEnd = 0; int jrPartEnd = 0; if (commaFirst < 0) { // no commas if (vonStart < 0) { // no 'von part' lastPartEnd = tokens.size(); lastPartStart = tokens.size() - TOKEN_GROUP_LENGTH; int index = (tokens.size() - (2 * TOKEN_GROUP_LENGTH)) + OFFSET_TOKEN_TERM; if (index > 0) { Character ch = (Character) tokens.get(index); if (ch == '-') { lastPartStart -= TOKEN_GROUP_LENGTH; } } firstPartEnd = lastPartStart; if (firstPartEnd > 0) { firstPartStart = 0; } } else { // 'von part' is present if (lastStart >= 0) { lastPartEnd = tokens.size(); lastPartStart = lastStart; vonPartEnd = lastPartStart; } else { vonPartEnd = tokens.size(); } vonPartStart = vonStart; firstPartEnd = vonPartStart; if (firstPartEnd > 0) { firstPartStart = 0; } } } else { // commas are present: it affects only 'first part' and 'junior part' firstPartEnd = tokens.size(); if (commaSecond < 0) { // one comma if (commaFirst < firstPartEnd) { firstPartStart = commaFirst; } } else { // two or more commas if (commaSecond < firstPartEnd) { firstPartStart = commaSecond; } jrPartEnd = commaSecond; if (commaFirst < jrPartEnd) { jrPartStart = commaFirst; } } if (vonStart == 0) { // 'von part' is present if (lastStart < 0) { vonPartEnd = commaFirst; } else { lastPartEnd = commaFirst; lastPartStart = lastStart; vonPartEnd = lastPartStart; } vonPartStart = 0; } else { // no 'von part' lastPartEnd = commaFirst; if (lastPartEnd > 0) { lastPartStart = 0; } } } if ((firstPartStart == -1) && (lastPartStart == -1) && (vonPartStart != -1)) { // There is no first or last name, but we have a von part. This is likely // to indicate a single-entry name without an initial capital letter, such // as "unknown". // We make the von part the last name, to facilitate handling by last-name formatters: lastPartStart = vonPartStart; lastPartEnd = vonPartEnd; vonPartStart = -1; vonPartEnd = -1; } // Third step: do actual splitting, construct Author object String firstPart = firstPartStart < 0 ? null : concatTokens(tokens, firstPartStart, firstPartEnd, OFFSET_TOKEN, false); String firstAbbr = firstPartStart < 0 ? null : concatTokens(tokens, firstPartStart, firstPartEnd, OFFSET_TOKEN_ABBR, true); String vonPart = vonPartStart < 0 ? null : concatTokens(tokens, vonPartStart, vonPartEnd, OFFSET_TOKEN, false); String lastPart = lastPartStart < 0 ? null : concatTokens(tokens, lastPartStart, lastPartEnd, OFFSET_TOKEN, false); String jrPart = jrPartStart < 0 ? null : concatTokens(tokens, jrPartStart, jrPartEnd, OFFSET_TOKEN, false); if ((firstPart != null) && (lastPart != null) && lastPart.equals(lastPart.toUpperCase(Locale.ROOT)) && (lastPart.length() < 5) && (Character.UnicodeScript.of(lastPart.charAt(0)) != Character.UnicodeScript.HAN)) { // The last part is a small string in complete upper case, so interpret it as initial of the first name // This is the case for example in "Smith SH" which we think of as lastname=Smith and firstname=SH // The length < 5 constraint should allow for "Smith S.H." as input return Optional.of(new Author(lastPart, lastPart, vonPart, firstPart, jrPart)); } else { return Optional.of(new Author(firstPart, firstAbbr, vonPart, lastPart, jrPart)); } } /** * Concatenates list of tokens from 'tokens' Vector. Tokens are separated by spaces or dashes, depending on stored * in 'tokens'. Callers always ensure that start < end; thus, there exists at least one token to be concatenated. * * @param start index of the first token to be concatenated in 'tokens' Vector (always divisible by * TOKEN_GROUP_LENGTH). * @param end index of the first token not to be concatenated in 'tokens' Vector (always divisible by * TOKEN_GROUP_LENGTH). * @param offset offset within token group (used to request concatenation of either full tokens or abbreviation). * @param dotAfter <CODE>true</CODE> -- add period after each token, <CODE>false</CODE> -- * do not add. * @return the result of concatenation. */ private String concatTokens(List<Object> tokens, int start, int end, int offset, boolean dotAfter) { StringBuilder result = new StringBuilder(); // Here we always have start < end result.append((String) tokens.get(start + offset)); if (dotAfter) { result.append('.'); } int updatedStart = start + TOKEN_GROUP_LENGTH; while (updatedStart < end) { result.append(tokens.get((updatedStart - TOKEN_GROUP_LENGTH) + OFFSET_TOKEN_TERM)); result.append((String) tokens.get(updatedStart + offset)); if (dotAfter) { result.append('.'); } updatedStart += TOKEN_GROUP_LENGTH; } return result.toString(); } /** * Parses the next token. * <p> * The string being parsed is stored in global variable <CODE>original</CODE>, and position which parsing has to * start from is stored in global variable * <CODE>token_end</CODE>; thus, <CODE>token_end</CODE> has to be set * to 0 before the first invocation. Procedure updates <CODE>token_end</CODE>; thus, subsequent invocations do not * require any additional variable settings. * <p> * The type of the token is returned; if it is <CODE>Token.WORD</CODE>, additional information is given in global * variables <CODE>token_start</CODE>, * <CODE>token_end</CODE>, <CODE>token_abbr</CODE>, <CODE>token_term</CODE>, * and <CODE>token_case</CODE>; namely: <CODE>original.substring(token_start,token_end)</CODE> is the text of the * token, <CODE>original.substring(token_start,token_abbr)</CODE> is the token abbreviation, <CODE>token_term</CODE> * contains token terminator (space or dash), and <CODE>token_case</CODE> is <CODE>true</CODE>, if token is * upper-case and <CODE>false</CODE> if token is lower-case. * * @return <CODE>Token.EOF</CODE> -- no more tokens, <CODE>Token.COMMA</CODE> -- * token is comma, <CODE>Token.AND</CODE> -- token is the word "and" (or "And", or "aND", etc.) or a semicolon, * <CODE>Token.WORD</CODE> -- token is a word; additional information is given in global variables * <CODE>token_start</CODE>, <CODE>token_end</CODE>, * <CODE>token_abbr</CODE>, <CODE>token_term</CODE>, and * <CODE>token_case</CODE>. */ private Token getToken() { tokenStart = tokenEnd; while (tokenStart < original.length()) { char c = original.charAt(tokenStart); if (!((c == '~') || (c == '-') || Character.isWhitespace(c))) { break; } tokenStart++; } tokenEnd = tokenStart; if (tokenStart >= original.length()) { return Token.EOF; } if (original.charAt(tokenStart) == ',') { tokenEnd++; return Token.COMMA; } // Semicolon is considered to separate names like "and" if (original.charAt(tokenStart) == ';') { tokenEnd++; return Token.AND; } tokenAbbrEnd = -1; tokenTerm = ' '; tokenCase = true; int bracesLevel = 0; int currentBackslash = -1; boolean firstLetterIsFound = false; while (tokenEnd < original.length()) { char c = original.charAt(tokenEnd); if (c == '{') { bracesLevel++; } if (firstLetterIsFound && (tokenAbbrEnd < 0) && ((bracesLevel == 0) || (c == '{'))) { tokenAbbrEnd = tokenEnd; } if ((c == '}') && (bracesLevel > 0)) { bracesLevel--; } if (!firstLetterIsFound && (currentBackslash < 0) && Character.isLetter(c)) { if (bracesLevel == 0) { tokenCase = Character.isUpperCase(c) || (Character.UnicodeScript.of(c) == Character.UnicodeScript.HAN); } else { // If this is a particle in braces, always treat it as if it starts with // an upper case letter. Otherwise a name such as "{van den Bergen}, Hans" // will not yield a proper last name: tokenCase = true; } firstLetterIsFound = true; } if ((currentBackslash >= 0) && !Character.isLetter(c)) { if (!firstLetterIsFound) { String texCmdName = original.substring(currentBackslash + 1, tokenEnd); if (TEX_NAMES.contains(texCmdName)) { tokenCase = Character.isUpperCase(texCmdName.charAt(0)); firstLetterIsFound = true; } } currentBackslash = -1; } if (c == '\\') { currentBackslash = tokenEnd; } if ((bracesLevel == 0) && ((",;-".indexOf(c) != -1) || Character.isWhitespace(c))) { break; } tokenEnd++; } if (tokenAbbrEnd < 0) { tokenAbbrEnd = tokenEnd; } if ((tokenEnd < original.length()) && (original.charAt(tokenEnd) == '-')) { tokenTerm = '-'; } if ("and".equalsIgnoreCase(original.substring(tokenStart, tokenEnd))) { return Token.AND; } else { return Token.WORD; } } // Token types (returned by getToken procedure) private enum Token { EOF, AND, COMMA, WORD } }
21,208
41.933198
134
java
null
jabref-main/src/main/java/org/jabref/logic/importer/CompositeIdFetcher.java
package org.jabref.logic.importer; import java.util.Optional; import java.util.stream.Stream; import org.jabref.logic.importer.fetcher.ArXivFetcher; import org.jabref.logic.importer.fetcher.DoiFetcher; import org.jabref.logic.importer.fetcher.isbntobibtex.EbookDeIsbnFetcher; import org.jabref.logic.importer.fetcher.isbntobibtex.IsbnFetcher; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.identifier.ArXivIdentifier; import org.jabref.model.entry.identifier.DOI; import org.jabref.model.entry.identifier.ISBN; public class CompositeIdFetcher { private final ImportFormatPreferences importFormatPreferences; public CompositeIdFetcher(ImportFormatPreferences importFormatPreferences) { this.importFormatPreferences = importFormatPreferences; } public Optional<BibEntry> performSearchById(String identifier) throws FetcherException { Optional<DOI> doi = DOI.findInText(identifier); if (doi.isPresent()) { return new DoiFetcher(importFormatPreferences).performSearchById(doi.get().getNormalized()); } Optional<ArXivIdentifier> arXivIdentifier = ArXivIdentifier.parse(identifier); if (arXivIdentifier.isPresent()) { return new ArXivFetcher(importFormatPreferences).performSearchById(arXivIdentifier.get().getNormalized()); } Optional<ISBN> isbn = ISBN.parse(identifier); if (isbn.isPresent()) { return new IsbnFetcher(importFormatPreferences) .addRetryFetcher(new EbookDeIsbnFetcher(importFormatPreferences)) // .addRetryFetcher(new DoiToBibtexConverterComIsbnFetcher(importFormatPreferences)) .performSearchById(isbn.get().getNormalized()); } /* TODO: IACR is currently disabled, because it needs to be reworked: https://github.com/JabRef/jabref/issues/8876 Optional<IacrEprint> iacrEprint = IacrEprint.parse(identifier); if (iacrEprint.isPresent()) { return new IacrEprintFetcher(importFormatPreferences).performSearchById(iacrEprint.get().getNormalized()); }*/ return Optional.empty(); } public String getName() { return "CompositeIdFetcher"; } public static boolean containsValidId(String identifier) { Optional<DOI> doi = DOI.findInText(identifier); Optional<ArXivIdentifier> arXivIdentifier = ArXivIdentifier.parse(identifier); Optional<ISBN> isbn = ISBN.parse(identifier); return Stream.of(doi, arXivIdentifier, isbn).anyMatch(Optional::isPresent); } }
2,593
42.233333
122
java
null
jabref-main/src/main/java/org/jabref/logic/importer/EntryBasedFetcher.java
package org.jabref.logic.importer; import java.util.List; import org.jabref.model.entry.BibEntry; /** * Searches web resources for bibliographic information based on a {@link BibEntry}. * Useful to <b>complete</b> an existing entry with fetched information. * May return multiple search hits. */ public interface EntryBasedFetcher extends WebFetcher { /** * Looks for hits which are matched by the given {@link BibEntry}. * * @param entry entry to search bibliographic information for * @return a list of {@link BibEntry}, which are matched by the query (may be empty) */ List<BibEntry> performSearch(BibEntry entry) throws FetcherException; }
685
30.181818
88
java
null
jabref-main/src/main/java/org/jabref/logic/importer/EntryBasedParserFetcher.java
package org.jabref.logic.importer; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.Collections; import java.util.List; import java.util.Objects; import org.jabref.logic.cleanup.Formatter; import org.jabref.model.entry.BibEntry; /** * Provides a convenient interface for entry-based fetcher, which follow the usual three-step procedure: * 1. Open a URL based on the entry * 2. Parse the response to get a list of {@link BibEntry} * 3. Post-process fetched entries */ public interface EntryBasedParserFetcher extends EntryBasedFetcher { /** * Constructs a URL based on the {@link BibEntry}. * * @param entry the entry to look information for */ URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException, FetcherException; /** * Returns the parser used to convert the response to a list of {@link BibEntry}. */ Parser getParser(); /** * Performs a cleanup of the fetched entry. * * Only systematic errors of the fetcher should be corrected here * (i.e. if information is consistently contained in the wrong field or the wrong format) * but not cosmetic issues which may depend on the user's taste (for example, LateX code vs HTML in the abstract). * * Try to reuse existing {@link Formatter} for the cleanup. For example, * {@code new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);} * * By default, no cleanup is done. * * @param entry the entry to be cleaned-up */ default void doPostCleanup(BibEntry entry) { // Do nothing by default } @Override default List<BibEntry> performSearch(BibEntry entry) throws FetcherException { Objects.requireNonNull(entry); URL UrlForEntry; try { if ((UrlForEntry = getURLForEntry(entry)) == null) { return Collections.emptyList(); } } catch (MalformedURLException | URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } try (InputStream stream = new BufferedInputStream(UrlForEntry.openStream())) { List<BibEntry> fetchedEntries = getParser().parseEntries(stream); // Post-cleanup fetchedEntries.forEach(this::doPostCleanup); return fetchedEntries; } catch (IOException e) { // TODO: Catch HTTP Response 401 errors and report that user has no rights to access resource throw new FetcherException("A network error occurred", e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred", e); } } }
2,884
34.182927
118
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FetcherClientException.java
package org.jabref.logic.importer; /** * Should be thrown when you encounter an HTTP status code error &gt;= 400 and &lt; 500. */ public class FetcherClientException extends FetcherException { public FetcherClientException(String errorMessage, Throwable cause) { super(errorMessage, cause); } public FetcherClientException(String errorMessage) { super(errorMessage); } public FetcherClientException(String errorMessage, String localizedMessage, Throwable cause) { super(errorMessage, localizedMessage, cause); } }
568
27.45
98
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FetcherException.java
package org.jabref.logic.importer; import org.jabref.logic.JabRefException; public class FetcherException extends JabRefException { public FetcherException(String errorMessage, Throwable cause) { super(errorMessage, cause); } public FetcherException(String errorMessage) { super(errorMessage); } public FetcherException(String errorMessage, String localizedMessage, Throwable cause) { super(errorMessage, localizedMessage, cause); } }
488
24.736842
92
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FetcherResult.java
package org.jabref.logic.importer; import java.net.URL; import org.jabref.logic.importer.fetcher.TrustLevel; public final class FetcherResult { private final TrustLevel trust; private final URL source; public FetcherResult(TrustLevel trust, URL source) { this.trust = trust; this.source = source; } public TrustLevel getTrust() { return trust; } public URL getSource() { return source; } }
460
18.208333
56
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FetcherServerException.java
package org.jabref.logic.importer; /** * Should be thrown when you encounter a http status code error >= 500 */ public class FetcherServerException extends FetcherException { public FetcherServerException(String errorMessage, Throwable cause) { super(errorMessage, cause); } public FetcherServerException(String errorMessage) { super(errorMessage); } public FetcherServerException(String errorMessage, String localizedMessage, Throwable cause) { super(errorMessage, localizedMessage, cause); } }
550
28
98
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FulltextFetcher.java
package org.jabref.logic.importer; import java.io.IOException; import java.net.URL; import java.util.Optional; import org.jabref.logic.importer.fetcher.TrustLevel; import org.jabref.model.entry.BibEntry; /** * This interface is used for classes that try to resolve a full-text PDF url for a BibTex entry. * Implementing classes should specialize on specific article sites. * See e.g. @link{http://libguides.mit.edu/apis}. */ public interface FulltextFetcher { /** * Tries to find a fulltext URL for a given BibTex entry. * * @param entry The Bibtex entry * @return The fulltext PDF URL Optional, if found, or an empty Optional if not found. * @throws NullPointerException if no BibTex entry is given * @throws java.io.IOException if an IO operation has failed * @throws FetcherException if a fetcher specific error occurred */ Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException; /** * Returns the level of trust for this fetcher. * We distinguish between publishers and meta search engines for example. * * @return The trust level of the fetcher, the higher the better */ default TrustLevel getTrustLevel() { return TrustLevel.UNKNOWN; } }
1,275
33.486486
97
java
null
jabref-main/src/main/java/org/jabref/logic/importer/FulltextFetchers.java
package org.jabref.logic.importer; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import java.util.stream.Collectors; import org.jabref.gui.JabRefExecutorService; import org.jabref.logic.net.URLDownload; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Utility class for trying to resolve URLs to full-text PDF for articles. * * Combines multiple {@link FulltextFetcher}s together. Each fetcher is invoked, the "best" result (sorted by the fetcher trust level) is returned. */ public class FulltextFetchers { private static final Logger LOGGER = LoggerFactory.getLogger(FulltextFetchers.class); // Timeout in seconds private static final int FETCHER_TIMEOUT = 10; private final Set<FulltextFetcher> finders = new HashSet<>(); private final Predicate<String> isPDF = url -> { try { return new URLDownload(url).isPdf(); } catch (MalformedURLException e) { LOGGER.warn("URL returned by fulltext fetcher is invalid"); } return false; }; public FulltextFetchers(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) { this(WebFetchers.getFullTextFetchers(importFormatPreferences, importerPreferences)); } FulltextFetchers(Set<FulltextFetcher> fetcher) { finders.addAll(fetcher); } public Optional<URL> findFullTextPDF(BibEntry entry) { // for accuracy, fetch DOI first but do not modify entry BibEntry clonedEntry = (BibEntry) entry.clone(); Optional<DOI> doi = clonedEntry.getField(StandardField.DOI).flatMap(DOI::parse); if (doi.isEmpty()) { findDoiForEntry(clonedEntry); } List<Future<Optional<FetcherResult>>> result = JabRefExecutorService.INSTANCE.executeAll(getCallables(clonedEntry, finders), FETCHER_TIMEOUT, TimeUnit.SECONDS); return result.stream() .map(FulltextFetchers::getResults) .filter(Optional::isPresent) .map(Optional::get) .filter(res -> Objects.nonNull(res.getSource())) .sorted(Comparator.comparingInt((FetcherResult res) -> res.getTrust().getTrustScore()).reversed()) .map(FetcherResult::getSource) .findFirst(); } private void findDoiForEntry(BibEntry clonedEntry) { try { WebFetchers.getIdFetcherForIdentifier(DOI.class) .findIdentifier(clonedEntry) .ifPresent(e -> clonedEntry.setField(StandardField.DOI, e.getDOI())); } catch (FetcherException e) { LOGGER.debug("Failed to find DOI", e); } } private static Optional<FetcherResult> getResults(Future<Optional<FetcherResult>> future) { try { return future.get(); } catch (InterruptedException ignore) { // ignore thread interruptions } catch (ExecutionException | CancellationException e) { LOGGER.debug("Fetcher execution failed or was cancelled"); } return Optional.empty(); } private Callable<Optional<FetcherResult>> getCallable(BibEntry entry, FulltextFetcher fetcher) { return () -> { try { return fetcher.findFullText(entry) .filter(url -> isPDF.test(url.toString())) .map(url -> new FetcherResult(fetcher.getTrustLevel(), url)); } catch (IOException | FetcherException e) { LOGGER.debug("Failed to find fulltext PDF at given URL", e); } return Optional.empty(); }; } private List<Callable<Optional<FetcherResult>>> getCallables(BibEntry entry, Set<FulltextFetcher> fetchers) { return fetchers.stream() .map(f -> getCallable(entry, f)) .collect(Collectors.toList()); } }
4,546
36.891667
168
java
null
jabref-main/src/main/java/org/jabref/logic/importer/IdBasedFetcher.java
package org.jabref.logic.importer; import java.util.Optional; import org.jabref.model.entry.BibEntry; /** * Searches web resources for bibliographic information based on an identifier. * Examples are ISBN numbers and DOIs. */ public interface IdBasedFetcher extends WebFetcher { /** * Looks for bibliographic information associated to the given identifier. * * @param identifier a string which uniquely identifies the item * @return a {@link BibEntry} containing the bibliographic information (or an empty optional if no data was found) */ Optional<BibEntry> performSearchById(String identifier) throws FetcherException; }
663
30.619048
118
java
null
jabref-main/src/main/java/org/jabref/logic/importer/IdBasedParserFetcher.java
package org.jabref.logic.importer; import java.io.IOException; import java.io.InputStream; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.List; import java.util.Optional; import org.jabref.logic.cleanup.Formatter; import org.jabref.model.entry.BibEntry; import org.jabref.model.strings.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Provides a convenient interface for ID-based fetcher, which follow the usual three-step procedure: * 1. Open a URL based on the search query * 2. Parse the response to get a list of {@link BibEntry} * 3. Post-process fetched entries */ public interface IdBasedParserFetcher extends IdBasedFetcher { Logger LOGGER = LoggerFactory.getLogger(IdBasedParserFetcher.class); /** * Constructs a URL based on the query. * * @param identifier the ID */ URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException; /** * Returns the parser used to convert the response to a list of {@link BibEntry}. */ Parser getParser(); /** * Performs a cleanup of the fetched entry. * * Only systematic errors of the fetcher should be corrected here * (i.e. if information is consistently contained in the wrong field or the wrong format) * but not cosmetic issues which may depend on the user's taste (for example, LateX code vs HTML in the abstract). * * Try to reuse existing {@link Formatter} for the cleanup. For example, * {@code new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);} * * By default, no cleanup is done. * * @param entry the entry to be cleaned-up */ default void doPostCleanup(BibEntry entry) { // Do nothing by default } @Override default Optional<BibEntry> performSearchById(String identifier) throws FetcherException { if (StringUtil.isBlank(identifier)) { return Optional.empty(); } try (InputStream stream = getUrlDownload(getUrlForIdentifier(identifier)).asInputStream()) { List<BibEntry> fetchedEntries = getParser().parseEntries(stream); if (fetchedEntries.isEmpty()) { return Optional.empty(); } if (fetchedEntries.size() > 1) { LOGGER.info("Fetcher {} found more than one result for identifier {}. We will use the first entry.", getName(), identifier); } BibEntry entry = fetchedEntries.get(0); // Post-cleanup doPostCleanup(entry); return Optional.of(entry); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { // check for the case where we already have a FetcherException from UrlDownload if (e.getCause() instanceof FetcherException fe) { throw fe; } throw new FetcherException("A network error occurred", e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred", e); } } }
3,270
33.797872
140
java
null
jabref-main/src/main/java/org/jabref/logic/importer/IdFetcher.java
package org.jabref.logic.importer; import java.util.Optional; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.identifier.Identifier; /** * Looks for article identifier based on already present bibliographic information. */ public interface IdFetcher<T extends Identifier> extends WebFetcher { /** * Looks for an identifier based on the information stored in the given {@link BibEntry}. * * @param entry the {@link BibEntry} for which an identifier should be found * @return the identifier (if an ID was found, otherwise an empty {@link Optional}) */ Optional<T> findIdentifier(BibEntry entry) throws FetcherException; /** * Returns the name of the identifier that is returned by this fetcher. */ String getIdentifierName(); }
806
30.038462
93
java
null
jabref-main/src/main/java/org/jabref/logic/importer/IdParserFetcher.java
package org.jabref.logic.importer; import java.io.BufferedInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.List; import java.util.Objects; import java.util.Optional; import org.jabref.logic.cleanup.Formatter; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.identifier.Identifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Provides a convenient interface for {@link IdFetcher}, which follow the usual three-step procedure: * 1. Open a URL based on the search query * 2. Parse the response to get a list of {@link BibEntry} * 3. Extract identifier */ public interface IdParserFetcher<T extends Identifier> extends IdFetcher<T> { Logger LOGGER = LoggerFactory.getLogger(IdParserFetcher.class); /** * Constructs a URL based on the {@link BibEntry}. * * @param entry the entry to look information for */ URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException, FetcherException; /** * Returns the parser used to convert the response to a list of {@link BibEntry}. */ Parser getParser(); /** * Performs a cleanup of the fetched entry. * * Only systematic errors of the fetcher should be corrected here * (i.e. if information is consistently contained in the wrong field or the wrong format) * but not cosmetic issues which may depend on the user's taste (for example, LateX code vs HTML in the abstract). * * Try to reuse existing {@link Formatter} for the cleanup. For example, * {@code new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);} * * By default, no cleanup is done. * * @param entry the entry to be cleaned-up */ default void doPostCleanup(BibEntry entry) { // Do nothing by default } /** * Extracts the identifier from the list of fetched entries. * * @param inputEntry the entry for which we are searching the identifier (can be used to find closest match in * the result) * @param fetchedEntries list of entries returned by the web service */ Optional<T> extractIdentifier(BibEntry inputEntry, List<BibEntry> fetchedEntries) throws FetcherException; @Override default Optional<T> findIdentifier(BibEntry entry) throws FetcherException { Objects.requireNonNull(entry); try (InputStream stream = new BufferedInputStream(getURLForEntry(entry).openStream())) { List<BibEntry> fetchedEntries = getParser().parseEntries(stream); if (fetchedEntries.isEmpty()) { return Optional.empty(); } // Post-cleanup fetchedEntries.forEach(this::doPostCleanup); return extractIdentifier(entry, fetchedEntries); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (FileNotFoundException e) { LOGGER.debug("Id not found"); return Optional.empty(); } catch (IOException e) { // check for the case where we already have a FetcherException from UrlDownload if (e.getCause() instanceof FetcherException fe) { throw fe; } throw new FetcherException("An I/O exception occurred", e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred", e); } } }
3,682
35.465347
118
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ImportCleanup.java
package org.jabref.logic.importer; import java.util.Collection; import org.jabref.logic.cleanup.ConvertToBiblatexCleanup; import org.jabref.logic.cleanup.ConvertToBibtexCleanup; import org.jabref.model.database.BibDatabaseMode; import org.jabref.model.entry.BibEntry; public class ImportCleanup { private final BibDatabaseMode targetBibEntryFormat; public ImportCleanup(BibDatabaseMode targetBibEntryFormat) { this.targetBibEntryFormat = targetBibEntryFormat; } /** * Performs a format conversion of the given entry into the targeted format. * * @return Returns the cleaned up bibentry to enable usage of doPostCleanup in streams. */ public BibEntry doPostCleanup(BibEntry entry) { if (targetBibEntryFormat == BibDatabaseMode.BIBTEX) { new ConvertToBibtexCleanup().cleanup(entry); } else if (targetBibEntryFormat == BibDatabaseMode.BIBLATEX) { new ConvertToBiblatexCleanup().cleanup(entry); } return entry; } /** * Performs a format conversion of the given entry collection into the targeted format. */ public void doPostCleanup(Collection<BibEntry> entries) { entries.forEach(this::doPostCleanup); } }
1,248
31.025641
91
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ImportException.java
package org.jabref.logic.importer; import org.jabref.logic.JabRefException; public class ImportException extends JabRefException { public ImportException(String errorMessage, Exception cause) { super(errorMessage, cause); } public ImportException(String errorMessage) { super(errorMessage); } public ImportException(Exception cause) { super(cause); } }
406
20.421053
66
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ImportFormatPreferences.java
package org.jabref.logic.importer; import org.jabref.logic.bibtex.FieldPreferences; import org.jabref.logic.citationkeypattern.CitationKeyPatternPreferences; import org.jabref.logic.importer.fetcher.GrobidPreferences; import org.jabref.logic.preferences.DOIPreferences; import org.jabref.logic.xmp.XmpPreferences; import org.jabref.preferences.BibEntryPreferences; public record ImportFormatPreferences( BibEntryPreferences bibEntryPreferences, CitationKeyPatternPreferences citationKeyPatternPreferences, FieldPreferences fieldPreferences, XmpPreferences xmpPreferences, DOIPreferences doiPreferences, GrobidPreferences grobidPreferences) { }
694
37.611111
73
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ImportFormatReader.java
package org.jabref.logic.importer; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.SortedSet; import java.util.TreeSet; import org.jabref.logic.importer.fileformat.BiblioscapeImporter; import org.jabref.logic.importer.fileformat.BibtexImporter; import org.jabref.logic.importer.fileformat.CffImporter; import org.jabref.logic.importer.fileformat.CitaviXmlImporter; import org.jabref.logic.importer.fileformat.CopacImporter; import org.jabref.logic.importer.fileformat.EndnoteImporter; import org.jabref.logic.importer.fileformat.EndnoteXmlImporter; import org.jabref.logic.importer.fileformat.InspecImporter; import org.jabref.logic.importer.fileformat.IsiImporter; import org.jabref.logic.importer.fileformat.MedlineImporter; import org.jabref.logic.importer.fileformat.MedlinePlainImporter; import org.jabref.logic.importer.fileformat.ModsImporter; import org.jabref.logic.importer.fileformat.MsBibImporter; import org.jabref.logic.importer.fileformat.OvidImporter; import org.jabref.logic.importer.fileformat.PdfContentImporter; import org.jabref.logic.importer.fileformat.PdfEmbeddedBibFileImporter; import org.jabref.logic.importer.fileformat.PdfGrobidImporter; import org.jabref.logic.importer.fileformat.PdfMergeMetadataImporter; import org.jabref.logic.importer.fileformat.PdfVerbatimBibTextImporter; import org.jabref.logic.importer.fileformat.PdfXmpImporter; import org.jabref.logic.importer.fileformat.RepecNepImporter; import org.jabref.logic.importer.fileformat.RisImporter; import org.jabref.logic.importer.fileformat.SilverPlatterImporter; import org.jabref.logic.l10n.Localization; import org.jabref.model.database.BibDatabases; import org.jabref.model.entry.BibEntry; import org.jabref.model.util.FileUpdateMonitor; public class ImportFormatReader { public static final String BIBTEX_FORMAT = "BibTeX"; /** * All import formats. * Sorted accordingly to {@link Importer#compareTo}, which defaults to alphabetically by the name */ private final List<Importer> formats = new ArrayList<>(); private final ImporterPreferences importerPreferences; private final ImportFormatPreferences importFormatPreferences; private final FileUpdateMonitor fileUpdateMonitor; public ImportFormatReader(ImporterPreferences importerPreferences, ImportFormatPreferences importFormatPreferences, FileUpdateMonitor fileUpdateMonitor) { this.importerPreferences = importerPreferences; this.importFormatPreferences = importFormatPreferences; this.fileUpdateMonitor = fileUpdateMonitor; reset(); } public void reset() { formats.add(new CopacImporter()); formats.add(new EndnoteImporter(importFormatPreferences)); formats.add(new EndnoteXmlImporter(importFormatPreferences)); formats.add(new InspecImporter()); formats.add(new IsiImporter()); formats.add(new MedlineImporter()); formats.add(new MedlinePlainImporter()); formats.add(new ModsImporter(importFormatPreferences)); formats.add(new MsBibImporter()); formats.add(new OvidImporter()); formats.add(new PdfMergeMetadataImporter(importFormatPreferences)); formats.add(new PdfVerbatimBibTextImporter(importFormatPreferences)); formats.add(new PdfContentImporter(importFormatPreferences)); formats.add(new PdfEmbeddedBibFileImporter(importFormatPreferences)); if (importFormatPreferences.grobidPreferences().isGrobidEnabled()) { formats.add(new PdfGrobidImporter(importFormatPreferences)); } formats.add(new PdfXmpImporter(importFormatPreferences.xmpPreferences())); formats.add(new RepecNepImporter(importFormatPreferences)); formats.add(new RisImporter()); formats.add(new SilverPlatterImporter()); formats.add(new CffImporter()); formats.add(new BiblioscapeImporter()); formats.add(new BibtexImporter(importFormatPreferences, fileUpdateMonitor)); formats.add(new CitaviXmlImporter()); // Get custom import formats formats.addAll(importerPreferences.getCustomImporters()); } /** * Format for a given CLI-ID. * <p> * <p>Will return the first format according to the default-order of * format that matches the given ID.</p> * * @param cliId CLI-Id * @return Import Format or <code>null</code> if none matches */ private Optional<Importer> getByCliId(String cliId) { for (Importer format : formats) { if (format.getId().equals(cliId)) { return Optional.of(format); } } return Optional.empty(); } public ParserResult importFromFile(String format, Path file) throws ImportException { Optional<Importer> importer = getByCliId(format); if (importer.isEmpty()) { throw new ImportException(Localization.lang("Unknown import format") + ": " + format); } try { return importer.get().importDatabase(file); } catch (IOException e) { throw new ImportException(e); } } /** * All importers. * * Elements are sorted by name. * </p> * * @return all custom importers, elements are of type InputFormat */ public SortedSet<Importer> getImportFormats() { return new TreeSet<>(this.formats); } public record UnknownFormatImport(String format, ParserResult parserResult) { } /** * Tries to import a file by iterating through the available import filters, * and keeping the import that seems most promising. * <p/> * This method first attempts to read this file as bibtex. * * @throws ImportException if the import fails (for example, if no suitable importer is found) */ public UnknownFormatImport importUnknownFormat(Path filePath, FileUpdateMonitor fileMonitor) throws ImportException { Objects.requireNonNull(filePath); try { UnknownFormatImport unknownFormatImport = importUnknownFormat(importer -> importer.importDatabase(filePath), importer -> importer.isRecognizedFormat(filePath)); unknownFormatImport.parserResult.setPath(filePath); return unknownFormatImport; } catch (ImportException e) { // If all importers fail, try to read the file as BibTeX try { ParserResult parserResult = OpenDatabase.loadDatabase(filePath, importFormatPreferences, fileMonitor); if (parserResult.getDatabase().hasEntries() || !parserResult.getDatabase().hasNoStrings()) { parserResult.setPath(filePath); return new UnknownFormatImport(ImportFormatReader.BIBTEX_FORMAT, parserResult); } else { throw new ImportException(parserResult.getErrorMessage()); } } catch (IOException ignore) { // Ignored throw new ImportException(Localization.lang("Could not find a suitable import format.")); } } } /** * Tries to import entries by iterating through the available import filters, * and keeping the import that seems the most promising * * @param importDatabase the function to import the entries with a formatter * @param isRecognizedFormat the function to check whether the source is in the correct format for an importer * @return an UnknownFormatImport with the imported entries and metadata * @throws ImportException if the import fails (for example, if no suitable importer is found) */ private UnknownFormatImport importUnknownFormat(CheckedFunction<Importer, ParserResult> importDatabase, CheckedFunction<Importer, Boolean> isRecognizedFormat) throws ImportException { // stores ref to best result, gets updated at the next loop List<BibEntry> bestResult = null; int bestResultCount = 0; String bestFormatName = null; // Cycle through all importers: for (Importer imFo : formats) { try { if (!isRecognizedFormat.apply(imFo)) { continue; } ParserResult parserResult = importDatabase.apply(imFo); List<BibEntry> entries = parserResult.getDatabase().getEntries(); BibDatabases.purgeEmptyEntries(entries); int entryCount = entries.size(); if (entryCount > bestResultCount) { bestResult = entries; bestResultCount = entryCount; bestFormatName = imFo.getName(); } } catch (IOException ex) { // The import did not succeed. Go on. } } if (bestResult != null) { // we found something ParserResult parserResult = new ParserResult(bestResult); return new UnknownFormatImport(bestFormatName, parserResult); } throw new ImportException(Localization.lang("Could not find a suitable import format.")); } @FunctionalInterface public interface CheckedFunction<T, R> { R apply(T t) throws IOException; } /** * Tries to import a String by iterating through the available import filters, * and keeping the import that seems the most promising * * @param data the string to import * @return an UnknownFormatImport with the imported entries and metadata * @throws ImportException if the import fails (for example, if no suitable importer is found) */ public UnknownFormatImport importUnknownFormat(String data) throws ImportException { Objects.requireNonNull(data); return importUnknownFormat(importer -> importer.importDatabase(data), importer -> importer.isRecognizedFormat(data)); } }
10,100
41.0875
187
java
null
jabref-main/src/main/java/org/jabref/logic/importer/Importer.java
package org.jabref.logic.importer; import java.io.BufferedInputStream; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.StringReader; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.Objects; import org.jabref.logic.util.FileType; import org.jabref.logic.util.io.FileUtil; import org.jabref.model.database.BibDatabaseModeDetection; import com.ibm.icu.text.CharsetDetector; import com.ibm.icu.text.CharsetMatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Role of an importer for JabRef. */ public abstract class Importer implements Comparable<Importer> { private static final Logger LOGGER = LoggerFactory.getLogger(Importer.class); /** * Check whether the source is in the correct format for this importer. * <p> * The effect of this method is primarily to avoid unnecessary processing of files when searching for a suitable * import format. If this method returns false, the import routine will move on to the next import format. * <p> * Thus the correct behaviour is to return false if it is certain that the file is not of the suitable type, and * true otherwise. Returning true is the safe choice if not certain. */ public abstract boolean isRecognizedFormat(BufferedReader input) throws IOException; /** * Check whether the source is in the correct format for this importer. * * @param filePath the path of the file to check * @return true, if the file is in a recognized format * @throws IOException Signals that an I/O exception has occurred. */ public boolean isRecognizedFormat(Path filePath) throws IOException { try (BufferedReader bufferedReader = getReader(filePath)) { return isRecognizedFormat(bufferedReader); } } /** * Check whether the source is in the correct format for this importer. * * @param data the data to check * @return true, if the data is in a recognized format * @throws IOException Signals that an I/O exception has occurred. */ public boolean isRecognizedFormat(String data) throws IOException { try (StringReader stringReader = new StringReader(data); BufferedReader bufferedReader = new BufferedReader(stringReader)) { return isRecognizedFormat(bufferedReader); } } /** * Parse the database in the source. * <p> * This method can be called in two different contexts - either when importing in a specified format, or when * importing in unknown format. In the latter case, JabRef cycles through all available import formats. No error * messages or feedback is displayed from individual import formats in this case. * <p> * If importing in a specified format and an empty library is returned, JabRef reports that no entries were found. * <p> * This method should never return null. * * @param input the input to read from */ public abstract ParserResult importDatabase(BufferedReader input) throws IOException; /** * Parse the database in the specified file. * * @param filePath the path to the file which should be imported */ public ParserResult importDatabase(Path filePath) throws IOException { try (InputStream inputStream = Files.newInputStream(filePath, StandardOpenOption.READ)) { BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream); Charset charset = StandardCharsets.UTF_8; BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(bufferedInputStream, charset)); ParserResult parserResult = importDatabase(bufferedReader); // store the detected encoding parserResult.getMetaData().setEncoding(charset); parserResult.setPath(filePath); // Make sure the mode is always set if (parserResult.getMetaData().getMode().isEmpty()) { parserResult.getMetaData().setMode(BibDatabaseModeDetection.inferMode(parserResult.getDatabase())); } return parserResult; } } protected static Charset getCharset(BufferedInputStream bufferedInputStream) { Charset defaultCharSet = StandardCharsets.UTF_8; // This reads the first 8000 bytes only, thus the default size of 8192 of the bufferedInputStream is OK. // See https://github.com/unicode-org/icu/blob/06ef8867f35befee7340e35082fefc9d3561d230/icu4j/main/classes/core/src/com/ibm/icu/text/CharsetDetector.java#L125 for details CharsetDetector charsetDetector = new CharsetDetector(); try { charsetDetector.setText(bufferedInputStream); CharsetMatch[] matches = charsetDetector.detectAll(); if ((matches == null) || (matches.length == 0)) { return defaultCharSet; } if (Arrays.stream(matches).anyMatch(charset -> "ASCII".equals(charset.getName()))) { return defaultCharSet; } if (matches[0] != null) { return Charset.forName(matches[0].getName()); } } catch (IOException e) { LOGGER.error("Could not determine charset. Using default one.", e); } return defaultCharSet; } /** * Parse the database in the specified string. * <p> * Importer having the facilities to detect the correct encoding of a string should overwrite this method, determine * the encoding and then call {@link #importDatabase(BufferedReader)}. * * @param data the string which should be imported * @return the parsed result * @throws IOException Signals that an I/O exception has occurred. */ public ParserResult importDatabase(String data) throws IOException { try (StringReader stringReader = new StringReader(data); BufferedReader bufferedReader = new BufferedReader(stringReader)) { return importDatabase(bufferedReader); } } public static BufferedReader getReader(Path filePath) throws IOException { InputStream stream = Files.newInputStream(filePath, StandardOpenOption.READ); if (FileUtil.isBibFile(filePath)) { return getReader(stream); } return new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } public static BufferedReader getReader(InputStream stream) { BufferedInputStream bufferedInputStream = new BufferedInputStream(stream); Charset charset = getCharset(bufferedInputStream); InputStreamReader reader = new InputStreamReader(bufferedInputStream, charset); return new BufferedReader(reader); } /** * Returns the name of this import format. * * <p>The name must be unique.</p> * * @return format name, must be unique and not <code>null</code> */ public abstract String getName(); /** * Returns the type of files that this importer can read * * @return {@link FileType} corresponding to the importer */ public abstract FileType getFileType(); /** * Returns a one-word ID which identifies this importer. Used for example, to identify the importer when used from * the command line. * * @return ID, must be unique and not <code>null</code> */ public String getId() { String id = getName(); StringBuilder result = new StringBuilder(id.length()); for (int i = 0; i < id.length(); i++) { char c = id.charAt(i); if (Character.isLetterOrDigit(c)) { result.append(Character.toLowerCase(c)); } } return result.toString(); } /** * Returns the description of the import format. * <p> * The description should specify * <ul><li> * what kind of entries from what sources and based on what specification it is able to import * </li><li> * by what criteria it {@link #isRecognizedFormat(BufferedReader) recognizes} an import format * </li></ul> * * @return description of the import format */ public abstract String getDescription(); @Override public int hashCode() { return getName().hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof Importer)) { return false; } Importer other = (Importer) obj; return Objects.equals(this.getName(), other.getName()); } @Override public String toString() { return getName(); } @Override public int compareTo(Importer o) { return getName().compareTo(o.getName()); } }
9,074
35.445783
178
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ImporterPreferences.java
package org.jabref.logic.importer; import java.nio.file.Path; import java.util.Set; import javafx.beans.property.BooleanProperty; import javafx.beans.property.ObjectProperty; import javafx.beans.property.SimpleBooleanProperty; import javafx.beans.property.SimpleObjectProperty; import javafx.collections.FXCollections; import javafx.collections.ObservableSet; import org.jabref.logic.importer.fileformat.CustomImporter; import org.jabref.logic.preferences.FetcherApiKey; public class ImporterPreferences { private final BooleanProperty generateNewKeyOnImport; private final BooleanProperty warnAboutDuplicatesOnImport; private final ObjectProperty<Path> importWorkingDirectory; private final ObservableSet<FetcherApiKey> apiKeys; private final ObservableSet<CustomImporter> customImporters; public ImporterPreferences(boolean generateNewKeyOnImport, Path importWorkingDirectory, boolean warnAboutDuplicatesOnImport, Set<CustomImporter> customImporters, Set<FetcherApiKey> apiKeys) { this.generateNewKeyOnImport = new SimpleBooleanProperty(generateNewKeyOnImport); this.importWorkingDirectory = new SimpleObjectProperty<>(importWorkingDirectory); this.warnAboutDuplicatesOnImport = new SimpleBooleanProperty(warnAboutDuplicatesOnImport); this.customImporters = FXCollections.observableSet(customImporters); this.apiKeys = FXCollections.observableSet(apiKeys); } public boolean isGenerateNewKeyOnImport() { return generateNewKeyOnImport.get(); } public BooleanProperty generateNewKeyOnImportProperty() { return generateNewKeyOnImport; } public void setGenerateNewKeyOnImport(boolean generateNewKeyOnImport) { this.generateNewKeyOnImport.set(generateNewKeyOnImport); } public Path getImportWorkingDirectory() { return importWorkingDirectory.get(); } public ObjectProperty<Path> importWorkingDirectoryProperty() { return importWorkingDirectory; } public void setImportWorkingDirectory(Path importWorkingDirectory) { this.importWorkingDirectory.set(importWorkingDirectory); } public boolean shouldWarnAboutDuplicatesOnImport() { return warnAboutDuplicatesOnImport.get(); } public BooleanProperty warnAboutDuplicatesOnImportProperty() { return warnAboutDuplicatesOnImport; } public void setWarnAboutDuplicatesOnImport(boolean warnAboutDuplicatesOnImport) { this.warnAboutDuplicatesOnImport.set(warnAboutDuplicatesOnImport); } public ObservableSet<FetcherApiKey> getApiKeys() { return apiKeys; } public ObservableSet<CustomImporter> getCustomImporters() { return customImporters; } public void setCustomImporters(Set<CustomImporter> importers) { customImporters.clear(); customImporters.addAll(importers); } }
3,013
34.458824
98
java
null
jabref-main/src/main/java/org/jabref/logic/importer/OpenDatabase.java
package org.jabref.logic.importer; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.List; import org.jabref.logic.importer.fileformat.BibtexImporter; import org.jabref.migrations.ConvertLegacyExplicitGroups; import org.jabref.migrations.ConvertMarkingToGroups; import org.jabref.migrations.PostOpenMigration; import org.jabref.migrations.SpecialFieldsToSeparateFields; import org.jabref.model.util.FileUpdateMonitor; public class OpenDatabase { private OpenDatabase() { } /** * Load database (bib-file) * * @param fileToOpen Name of the BIB-file to open * @return ParserResult which never is null */ public static ParserResult loadDatabase(Path fileToOpen, ImportFormatPreferences importFormatPreferences, FileUpdateMonitor fileMonitor) throws IOException { ParserResult result = new BibtexImporter(importFormatPreferences, fileMonitor).importDatabase(fileToOpen); performLoadDatabaseMigrations(result, importFormatPreferences.bibEntryPreferences().getKeywordSeparator()); return result; } private static void performLoadDatabaseMigrations(ParserResult parserResult, Character keywordDelimited) { List<PostOpenMigration> postOpenMigrations = Arrays.asList( new ConvertLegacyExplicitGroups(), new ConvertMarkingToGroups(), new SpecialFieldsToSeparateFields(keywordDelimited) ); for (PostOpenMigration migration : postOpenMigrations) { migration.performMigration(parserResult); } } }
1,620
33.489362
140
java
null
jabref-main/src/main/java/org/jabref/logic/importer/OutputPrinter.java
package org.jabref.logic.importer; public interface OutputPrinter { void setStatus(String s); void showMessage(String message, String title, int msgType); void showMessage(String string); }
206
17.818182
64
java
null
jabref-main/src/main/java/org/jabref/logic/importer/PagedSearchBasedFetcher.java
package org.jabref.logic.importer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.jabref.model.entry.BibEntry; import org.jabref.model.paging.Page; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser; public interface PagedSearchBasedFetcher extends SearchBasedFetcher { /** * @param luceneQuery the root node of the lucene query * @param pageNumber requested site number indexed from 0 * @return Page with search results */ Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException; /** * @param searchQuery query string that can be parsed into a lucene query * @param pageNumber requested site number indexed from 0 * @return Page with search results */ default Page<BibEntry> performSearchPaged(String searchQuery, int pageNumber) throws FetcherException { if (searchQuery.isBlank()) { return new Page<>(searchQuery, pageNumber, Collections.emptyList()); } SyntaxParser parser = new StandardSyntaxParser(); final String NO_EXPLICIT_FIELD = "default"; try { return this.performSearchPaged(parser.parse(searchQuery, NO_EXPLICIT_FIELD), pageNumber); } catch (QueryNodeParseException e) { throw new FetcherException("An error occurred during parsing of the query."); } } /** * @return default pageSize */ default int getPageSize() { return 20; } /** * This method is used to send complex queries using fielded search. * * @param luceneQuery the root node of the lucene query * @return a list of {@link BibEntry}, which are matched by the query (may be empty) */ @Override default List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { return new ArrayList<>(performSearchPaged(luceneQuery, 0).getContent()); } }
2,211
35.866667
107
java
null
jabref-main/src/main/java/org/jabref/logic/importer/PagedSearchBasedParserFetcher.java
package org.jabref.logic.importer; import java.io.IOException; import java.io.InputStream; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.List; import org.jabref.model.entry.BibEntry; import org.jabref.model.paging.Page; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public interface PagedSearchBasedParserFetcher extends SearchBasedParserFetcher, PagedSearchBasedFetcher { @Override default Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException { // ADR-0014 URL urlForQuery; try { urlForQuery = getURLForQuery(luceneQuery, pageNumber); } catch (URISyntaxException | MalformedURLException e) { throw new FetcherException("Search URI crafted from complex search query is malformed", e); } return new Page<>(luceneQuery.toString(), pageNumber, getBibEntries(urlForQuery)); } private List<BibEntry> getBibEntries(URL urlForQuery) throws FetcherException { try (InputStream stream = getUrlDownload(urlForQuery).asInputStream()) { List<BibEntry> fetchedEntries = getParser().parseEntries(stream); fetchedEntries.forEach(this::doPostCleanup); return fetchedEntries; } catch (IOException e) { throw new FetcherException("A network error occurred while fetching from " + urlForQuery, e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred while fetching from " + urlForQuery, e); } } /** * Constructs a URL based on the query, size and page number. * * @param luceneQuery the search query * @param pageNumber the number of the page indexed from 0 */ URL getURLForQuery(QueryNode luceneQuery, int pageNumber) throws URISyntaxException, MalformedURLException, FetcherException; @Override default URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { return getURLForQuery(luceneQuery, 0); } @Override default List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { return SearchBasedParserFetcher.super.performSearch(luceneQuery); } }
2,343
38.728814
129
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ParseException.java
package org.jabref.logic.importer; public class ParseException extends Exception { public ParseException(Throwable cause) { super(cause); } public ParseException(String message, Throwable cause) { super(message, cause); } public ParseException(String message) { super(message); } }
334
18.705882
60
java
null
jabref-main/src/main/java/org/jabref/logic/importer/Parser.java
package org.jabref.logic.importer; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.List; import org.jabref.model.entry.BibEntry; /** * A parser converts an {@link InputStream} into a list of {@link BibEntry}. */ public interface Parser { List<BibEntry> parseEntries(InputStream inputStream) throws ParseException; default List<BibEntry> parseEntries(String dataString) throws ParseException { return parseEntries(new ByteArrayInputStream(dataString.getBytes(StandardCharsets.UTF_8))); } }
592
27.238095
99
java
null
jabref-main/src/main/java/org/jabref/logic/importer/ParserResult.java
package org.jabref.logic.importer; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.Set; import org.jabref.model.database.BibDatabase; import org.jabref.model.database.BibDatabaseContext; import org.jabref.model.database.BibDatabases; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.BibEntryType; import org.jabref.model.metadata.MetaData; public class ParserResult { private final Set<BibEntryType> entryTypes; private final List<String> warnings = new ArrayList<>(); private BibDatabase database; private MetaData metaData; private Path file; private boolean invalid; private boolean toOpenTab; private boolean changedOnMigration = false; public ParserResult() { this(Collections.emptyList()); } public ParserResult(Collection<BibEntry> entries) { this(new BibDatabase(BibDatabases.purgeEmptyEntries(entries))); } public ParserResult(BibDatabase database) { this(database, new MetaData(), new HashSet<>()); } public ParserResult(BibDatabase database, MetaData metaData, Set<BibEntryType> entryTypes) { this.database = Objects.requireNonNull(database); this.metaData = Objects.requireNonNull(metaData); this.entryTypes = Objects.requireNonNull(entryTypes); } public static ParserResult fromErrorMessage(String message) { ParserResult parserResult = new ParserResult(); parserResult.addWarning(message); parserResult.setInvalid(true); return parserResult; } private static String getErrorMessage(Exception exception) { String errorMessage = exception.getLocalizedMessage(); if (exception.getCause() != null) { errorMessage += " Caused by: " + exception.getCause().getLocalizedMessage(); } return errorMessage; } public static ParserResult fromError(Exception exception) { return fromErrorMessage(getErrorMessage(exception)); } /** * Check if this database is marked to be added to the currently open tab. Default is false. */ public boolean toOpenTab() { return toOpenTab; } public void setToOpenTab() { this.toOpenTab = true; } public BibDatabase getDatabase() { return database; } public MetaData getMetaData() { return metaData; } public void setMetaData(MetaData md) { this.metaData = md; } public Set<BibEntryType> getEntryTypes() { return entryTypes; } public Optional<Path> getPath() { return Optional.ofNullable(file); } public void setPath(Path path) { file = path; } /** * Add a parser warning. * * @param s String Warning text. Must be pretranslated. Only added if there isn't already a dupe. */ public void addWarning(String s) { if (!warnings.contains(s)) { warnings.add(s); } } public void addException(Exception exception) { String errorMessage = getErrorMessage(exception); addWarning(errorMessage); } public boolean hasWarnings() { return !warnings.isEmpty(); } public List<String> warnings() { return new ArrayList<>(warnings); } public boolean isInvalid() { return invalid; } public void setInvalid(boolean invalid) { this.invalid = invalid; } public String getErrorMessage() { return String.join(" ", warnings()); } public BibDatabaseContext getDatabaseContext() { return new BibDatabaseContext(database, metaData, file); } public void setDatabaseContext(BibDatabaseContext bibDatabaseContext) { Objects.requireNonNull(bibDatabaseContext); database = bibDatabaseContext.getDatabase(); metaData = bibDatabaseContext.getMetaData(); file = bibDatabaseContext.getDatabasePath().orElse(null); } public boolean isEmpty() { return !this.getDatabase().hasEntries() && this.getDatabase().hasNoStrings() && this.getDatabase().getPreamble().isEmpty() && this.getMetaData().isEmpty(); } public boolean wasChangedOnMigration() { return changedOnMigration; } public void setChangedOnMigration(boolean wasChangedOnMigration) { this.changedOnMigration = wasChangedOnMigration; } }
4,599
27.04878
101
java
null
jabref-main/src/main/java/org/jabref/logic/importer/QueryParser.java
package org.jabref.logic.importer; import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; import org.jabref.logic.importer.fetcher.ComplexSearchQuery; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.standard.StandardQueryParser; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; /** * This class converts a query string written in lucene syntax into a complex query. * * For simplicity this is currently limited to fielded data and the boolean AND operator. */ public class QueryParser { /** * Parses the given query string into a complex query using lucene. * Note: For unique fields, the alphabetically and numerically first instance in the query string is used in the complex query. * * @param query The given query string * @return A complex query containing all fields of the query string */ public Optional<ComplexSearchQuery> parseQueryStringIntoComplexQuery(String query) { try { StandardQueryParser parser = new StandardQueryParser(); Query luceneQuery = parser.parse(query, "default"); Set<Term> terms = new HashSet<>(); // This implementation collects all terms from the leaves of the query tree independent of the internal boolean structure // If further capabilities are required in the future the visitor and ComplexSearchQuery has to be adapted accordingly. QueryVisitor visitor = QueryVisitor.termCollector(terms); luceneQuery.visit(visitor); List<Term> sortedTerms = new ArrayList<>(terms); sortedTerms.sort(Comparator.comparing(Term::text).reversed()); return Optional.of(ComplexSearchQuery.fromTerms(sortedTerms)); } catch (QueryNodeException | IllegalStateException | IllegalArgumentException ex) { return Optional.empty(); } } }
2,100
41.02
133
java
null
jabref-main/src/main/java/org/jabref/logic/importer/SearchBasedFetcher.java
package org.jabref.logic.importer; import java.util.Collections; import java.util.List; import org.jabref.model.entry.BibEntry; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser; import org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser; import static org.jabref.logic.importer.fetcher.transformers.AbstractQueryTransformer.NO_EXPLICIT_FIELD; /** * Searches web resources for bibliographic information based on a free-text query. * May return multiple search hits. * <p> * This interface is used for web resources which directly return BibTeX data ({@link BibEntry}) * </p> */ public interface SearchBasedFetcher extends WebFetcher { /** * This method is used to send complex queries using fielded search. * * @param luceneQuery the root node of the lucene query * @return a list of {@link BibEntry}, which are matched by the query (may be empty) */ List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException; /** * Looks for hits which are matched by the given free-text query. * * @param searchQuery query string that can be parsed into a lucene query * @return a list of {@link BibEntry}, which are matched by the query (may be empty) */ default List<BibEntry> performSearch(String searchQuery) throws FetcherException { if (searchQuery.isBlank()) { return Collections.emptyList(); } SyntaxParser parser = new StandardSyntaxParser(); QueryNode queryNode; try { queryNode = parser.parse(searchQuery, NO_EXPLICIT_FIELD); } catch (QueryNodeParseException e) { throw new FetcherException("An error occurred when parsing the query"); } return this.performSearch(queryNode); } }
1,971
35.518519
104
java
null
jabref-main/src/main/java/org/jabref/logic/importer/SearchBasedParserFetcher.java
package org.jabref.logic.importer; import java.io.IOException; import java.io.InputStream; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.List; import org.jabref.logic.cleanup.Formatter; import org.jabref.model.entry.BibEntry; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** * Provides a convenient interface for search-based fetcher, which follows the usual three-step procedure: * <ol> * <li>Open a URL based on the search query</li> * <li>Parse the response to get a list of {@link BibEntry}</li> * <li>Post-process fetched entries</li> * </ol> * <p> * This interface is used for web resources which do NOT provide BibTeX data {@link BibEntry}. * JabRef's infrastructure to convert arbitrary input data to BibTeX is {@link Parser}. * </p> * <p> * This interface inherits {@link SearchBasedFetcher}, because the methods <code>performSearch</code> have to be provided by both. * As non-BibTeX web fetcher one could do "magic" stuff without this helper interface and directly use {@link WebFetcher}, but this is more work. * </p> * <p> * Note that this interface "should" be an abstract class. * However, Java does not support multi inheritance with classes (but with interfaces). * We need multi inheritance, because a fetcher might implement multiple query types (such as id fetching {@link IdBasedFetcher}), complete entry {@link EntryBasedFetcher}, and search-based fetcher (this class). * </p> */ public interface SearchBasedParserFetcher extends SearchBasedFetcher { /** * This method is used to send queries with advanced URL parameters. * This method is necessary as the performSearch method does not support certain URL parameters that are used for * fielded search, such as a title, author, or year parameter. * * @param luceneQuery the root node of the lucene query */ @Override default List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { // ADR-0014 URL urlForQuery; try { urlForQuery = getURLForQuery(luceneQuery); } catch (URISyntaxException | MalformedURLException | FetcherException e) { throw new FetcherException("Search URI crafted from complex search query is malformed", e); } return getBibEntries(urlForQuery); } private List<BibEntry> getBibEntries(URL urlForQuery) throws FetcherException { try (InputStream stream = getUrlDownload(urlForQuery).asInputStream()) { List<BibEntry> fetchedEntries = getParser().parseEntries(stream); fetchedEntries.forEach(this::doPostCleanup); return fetchedEntries; } catch (IOException e) { throw new FetcherException("A network error occurred while fetching from " + urlForQuery, e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred while fetching from " + urlForQuery, e); } } /** * Returns the parser used to convert the response to a list of {@link BibEntry}. */ Parser getParser(); /** * Constructs a URL based on the lucene query. * * @param luceneQuery the root node of the lucene query */ URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException; /** * Performs a cleanup of the fetched entry. * <p> * Only systematic errors of the fetcher should be corrected here * (i.e. if information is consistently contained in the wrong field or the wrong format) * but not cosmetic issues which may depend on the user's taste (for example, LateX code vs HTML in the abstract). * <p> * Try to reuse existing {@link Formatter} for the cleanup. For example, * {@code new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry);} * <p> * By default, no cleanup is done. * * @param entry the entry to be cleaned-up */ default void doPostCleanup(BibEntry entry) { // Do nothing by default } }
4,202
41.454545
215
java
null
jabref-main/src/main/java/org/jabref/logic/importer/WebFetcher.java
package org.jabref.logic.importer; import java.net.URL; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.net.URLDownload; /** * Searches web resources for bibliographic information. */ public interface WebFetcher { /** * Returns the localized name of this fetcher. * The title can be used to display the fetcher in the menu and in the side pane. * * @return the localized name */ String getName(); /** * Returns the help page for this fetcher. * * @return the {@link HelpFile} enum constant for the help page */ default Optional<HelpFile> getHelpPage() { return Optional.empty(); // no help page by default } /** * Constructs an {@link URLDownload} object for downloading content based on the given URL. Overwrite, if you need to send additional headers for the download. */ default URLDownload getUrlDownload(URL url) { return new URLDownload(url); } }
1,006
25.5
163
java
null
jabref-main/src/main/java/org/jabref/logic/importer/WebFetchers.java
package org.jabref.logic.importer; import java.util.Comparator; import java.util.HashSet; import java.util.Optional; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import org.jabref.logic.importer.fetcher.ACMPortalFetcher; import org.jabref.logic.importer.fetcher.ACS; import org.jabref.logic.importer.fetcher.ApsFetcher; import org.jabref.logic.importer.fetcher.ArXivFetcher; import org.jabref.logic.importer.fetcher.AstrophysicsDataSystem; import org.jabref.logic.importer.fetcher.BiodiversityLibrary; import org.jabref.logic.importer.fetcher.BvbFetcher; import org.jabref.logic.importer.fetcher.CiteSeer; import org.jabref.logic.importer.fetcher.CollectionOfComputerScienceBibliographiesFetcher; import org.jabref.logic.importer.fetcher.CompositeSearchBasedFetcher; import org.jabref.logic.importer.fetcher.CrossRef; import org.jabref.logic.importer.fetcher.CustomizableKeyFetcher; import org.jabref.logic.importer.fetcher.DBLPFetcher; import org.jabref.logic.importer.fetcher.DOABFetcher; import org.jabref.logic.importer.fetcher.DOAJFetcher; import org.jabref.logic.importer.fetcher.DiVA; import org.jabref.logic.importer.fetcher.DoiFetcher; import org.jabref.logic.importer.fetcher.DoiResolution; import org.jabref.logic.importer.fetcher.GvkFetcher; import org.jabref.logic.importer.fetcher.IEEE; import org.jabref.logic.importer.fetcher.INSPIREFetcher; import org.jabref.logic.importer.fetcher.IacrEprintFetcher; import org.jabref.logic.importer.fetcher.LibraryOfCongress; import org.jabref.logic.importer.fetcher.MathSciNet; import org.jabref.logic.importer.fetcher.MedlineFetcher; import org.jabref.logic.importer.fetcher.Medra; import org.jabref.logic.importer.fetcher.OpenAccessDoi; import org.jabref.logic.importer.fetcher.ResearchGate; import org.jabref.logic.importer.fetcher.RfcFetcher; import org.jabref.logic.importer.fetcher.ScienceDirect; import org.jabref.logic.importer.fetcher.SemanticScholar; import org.jabref.logic.importer.fetcher.SpringerFetcher; import org.jabref.logic.importer.fetcher.SpringerLink; import org.jabref.logic.importer.fetcher.TitleFetcher; import org.jabref.logic.importer.fetcher.ZbMATH; import org.jabref.logic.importer.fetcher.isbntobibtex.EbookDeIsbnFetcher; import org.jabref.logic.importer.fetcher.isbntobibtex.IsbnFetcher; import org.jabref.logic.importer.fileformat.PdfMergeMetadataImporter; import org.jabref.model.database.BibDatabaseContext; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.jabref.model.entry.identifier.Identifier; import org.jabref.preferences.FilePreferences; import static org.jabref.model.entry.field.StandardField.EPRINT; import static org.jabref.model.entry.field.StandardField.ISBN; public class WebFetchers { private WebFetchers() { } public static Optional<IdBasedFetcher> getIdBasedFetcherForField(Field field, ImportFormatPreferences importFormatPreferences) { IdBasedFetcher fetcher; if (field == StandardField.DOI) { fetcher = new DoiFetcher(importFormatPreferences); } else if (field == ISBN) { fetcher = new IsbnFetcher(importFormatPreferences) .addRetryFetcher(new EbookDeIsbnFetcher(importFormatPreferences)); // .addRetryFetcher(new DoiToBibtexConverterComIsbnFetcher(importFormatPreferences)); } else if (field == EPRINT) { fetcher = new ArXivFetcher(importFormatPreferences); } else { return Optional.empty(); } return Optional.of(fetcher); } @SuppressWarnings("unchecked") public static <T extends Identifier> IdFetcher<T> getIdFetcherForIdentifier(Class<T> clazz) { if (clazz == DOI.class) { return (IdFetcher<T>) new CrossRef(); } else { throw new IllegalArgumentException("No fetcher found for identifier" + clazz.getCanonicalName()); } } public static Optional<IdFetcher<? extends Identifier>> getIdFetcherForField(Field field) { if (field == StandardField.DOI) { return Optional.of(new CrossRef()); } return Optional.empty(); } /** * @return sorted set containing search based fetchers */ public static SortedSet<SearchBasedFetcher> getSearchBasedFetchers(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) { SortedSet<SearchBasedFetcher> set = new TreeSet<>(Comparator.comparing(WebFetcher::getName)); set.add(new ArXivFetcher(importFormatPreferences)); set.add(new INSPIREFetcher(importFormatPreferences)); set.add(new GvkFetcher()); set.add(new BvbFetcher()); set.add(new MedlineFetcher()); set.add(new AstrophysicsDataSystem(importFormatPreferences, importerPreferences)); set.add(new MathSciNet(importFormatPreferences)); set.add(new ZbMATH(importFormatPreferences)); set.add(new ACMPortalFetcher()); // set.add(new GoogleScholar(importFormatPreferences)); set.add(new DBLPFetcher(importFormatPreferences)); set.add(new SpringerFetcher(importerPreferences)); set.add(new CrossRef()); set.add(new CiteSeer()); set.add(new DOAJFetcher(importFormatPreferences)); set.add(new IEEE(importFormatPreferences, importerPreferences)); set.add(new CompositeSearchBasedFetcher(set, 30)); set.add(new CollectionOfComputerScienceBibliographiesFetcher(importFormatPreferences)); set.add(new DOABFetcher()); // set.add(new JstorFetcher(importFormatPreferences)); set.add(new SemanticScholar()); set.add(new ResearchGate(importFormatPreferences)); set.add(new BiodiversityLibrary(importerPreferences)); return set; } /** * @return sorted set containing id based fetchers */ public static SortedSet<IdBasedFetcher> getIdBasedFetchers(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) { SortedSet<IdBasedFetcher> set = new TreeSet<>(Comparator.comparing(WebFetcher::getName)); set.add(new ArXivFetcher(importFormatPreferences)); set.add(new AstrophysicsDataSystem(importFormatPreferences, importerPreferences)); set.add(new IsbnFetcher(importFormatPreferences) .addRetryFetcher(new EbookDeIsbnFetcher(importFormatPreferences))); // .addRetryFetcher(new DoiToBibtexConverterComIsbnFetcher(importFormatPreferences))); set.add(new DiVA(importFormatPreferences)); set.add(new DoiFetcher(importFormatPreferences)); set.add(new MedlineFetcher()); set.add(new TitleFetcher(importFormatPreferences)); set.add(new MathSciNet(importFormatPreferences)); set.add(new ZbMATH(importFormatPreferences)); set.add(new CrossRef()); set.add(new LibraryOfCongress(importFormatPreferences)); set.add(new IacrEprintFetcher(importFormatPreferences)); set.add(new RfcFetcher(importFormatPreferences)); set.add(new Medra()); // set.add(new JstorFetcher(importFormatPreferences)); return set; } /** * @return sorted set containing entry based fetchers */ public static SortedSet<EntryBasedFetcher> getEntryBasedFetchers(ImporterPreferences importerPreferences, ImportFormatPreferences importFormatPreferences, FilePreferences filePreferences, BibDatabaseContext databaseContext) { SortedSet<EntryBasedFetcher> set = new TreeSet<>(Comparator.comparing(WebFetcher::getName)); set.add(new INSPIREFetcher(importFormatPreferences)); set.add(new AstrophysicsDataSystem(importFormatPreferences, importerPreferences)); set.add(new DoiFetcher(importFormatPreferences)); set.add(new IsbnFetcher(importFormatPreferences) .addRetryFetcher(new EbookDeIsbnFetcher(importFormatPreferences))); // .addRetryFetcher(new DoiToBibtexConverterComIsbnFetcher(importFormatPreferences))); set.add(new MathSciNet(importFormatPreferences)); set.add(new CrossRef()); set.add(new ZbMATH(importFormatPreferences)); set.add(new PdfMergeMetadataImporter.EntryBasedFetcherWrapper(importFormatPreferences, filePreferences, databaseContext)); set.add(new SemanticScholar()); set.add(new ResearchGate(importFormatPreferences)); return set; } /** * @return sorted set containing id fetchers */ public static SortedSet<IdFetcher<? extends Identifier>> getIdFetchers(ImportFormatPreferences importFormatPreferences) { SortedSet<IdFetcher<?>> set = new TreeSet<>(Comparator.comparing(WebFetcher::getName)); set.add(new CrossRef()); set.add(new ArXivFetcher(importFormatPreferences)); return set; } /** * @return set containing fulltext fetchers */ public static Set<FulltextFetcher> getFullTextFetchers(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) { Set<FulltextFetcher> fetchers = new HashSet<>(); // Original fetchers.add(new DoiResolution(importFormatPreferences.doiPreferences())); // Publishers fetchers.add(new ScienceDirect(importerPreferences)); fetchers.add(new SpringerLink(importerPreferences)); fetchers.add(new ACS()); fetchers.add(new ArXivFetcher(importFormatPreferences)); fetchers.add(new IEEE(importFormatPreferences, importerPreferences)); fetchers.add(new ApsFetcher()); fetchers.add(new IacrEprintFetcher(importFormatPreferences)); // Meta search // fetchers.add(new JstorFetcher(importFormatPreferences)); // fetchers.add(new GoogleScholar(importFormatPreferences)); fetchers.add(new CiteSeer()); fetchers.add(new OpenAccessDoi()); fetchers.add(new SemanticScholar()); fetchers.add(new ResearchGate(importFormatPreferences)); return fetchers; } /** * @return set containing customizable api key fetchers */ public static Set<CustomizableKeyFetcher> getCustomizableKeyFetchers(ImportFormatPreferences importFormatPreferences, ImporterPreferences importerPreferences) { Set<CustomizableKeyFetcher> fetchers = new HashSet<>(); fetchers.add(new IEEE(importFormatPreferences, importerPreferences)); fetchers.add(new SpringerFetcher(importerPreferences)); fetchers.add(new ScienceDirect(importerPreferences)); fetchers.add(new AstrophysicsDataSystem(importFormatPreferences, importerPreferences)); fetchers.add(new BiodiversityLibrary(importerPreferences)); return fetchers; } }
11,091
47.863436
164
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ACMPortalFetcher.java
package org.jabref.logic.importer.fetcher; import java.net.CookieHandler; import java.net.CookieManager; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer; import org.jabref.logic.importer.fileformat.ACMPortalParser; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public class ACMPortalFetcher implements SearchBasedParserFetcher { public static final String FETCHER_NAME = "ACM Portal"; private static final String SEARCH_URL = "https://dl.acm.org/action/doSearch"; public ACMPortalFetcher() { // website dl.acm.org requires cookies CookieHandler.setDefault(new CookieManager()); } @Override public String getName() { return FETCHER_NAME; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_ACM); } private static String createQueryString(QueryNode query) { return new DefaultQueryTransformer().transformLuceneQuery(query).orElse(""); } /** * Constructing the url for the searchpage. * * @param query query node * @return query URL */ @Override public URL getURLForQuery(QueryNode query) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(SEARCH_URL); uriBuilder.addParameter("AllField", createQueryString(query)); return uriBuilder.build().toURL(); } /** * Gets an instance of ACMPortalParser. * * @return the parser which can process the results returned from the ACM Portal search page */ @Override public Parser getParser() { return new ACMPortalParser(); } }
2,079
29.588235
115
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ACS.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.URL; import java.util.Objects; import java.util.Optional; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * FulltextFetcher implementation that attempts to find a PDF URL at ACS. */ public class ACS implements FulltextFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(ACS.class); private static final String SOURCE = "https://pubs.acs.org/doi/abs/%s"; /** * Tries to find a fulltext URL for a given BibTex entry. * <p> * Currently only uses the DOI if found. * * @param entry The Bibtex entry * @return The fulltext PDF URL Optional, if found, or an empty Optional if not found. * @throws NullPointerException if no BibTex entry is given * @throws java.io.IOException */ @Override public Optional<URL> findFullText(BibEntry entry) throws IOException { Objects.requireNonNull(entry); // DOI search Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse); if (!doi.isPresent()) { return Optional.empty(); } String source = String.format(SOURCE, doi.get().getDOI()); // Retrieve PDF link Document html = Jsoup.connect(source).ignoreHttpErrors(true).get(); Element link = html.select("a.button_primary").first(); if (link != null) { LOGGER.info("Fulltext PDF found @ ACS."); return Optional.of(new URL(source.replaceFirst("/abs/", "/pdf/"))); } return Optional.empty(); } @Override public TrustLevel getTrustLevel() { return TrustLevel.PUBLISHER; } }
1,996
29.723077
90
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/AbstractIsbnFetcher.java
package org.jabref.logic.importer.fetcher; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.IdBasedParserFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.l10n.Localization; import org.jabref.model.entry.identifier.ISBN; public abstract class AbstractIsbnFetcher implements IdBasedParserFetcher { protected final ImportFormatPreferences importFormatPreferences; public AbstractIsbnFetcher(ImportFormatPreferences importFormatPreferences) { this.importFormatPreferences = importFormatPreferences; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_ISBN); } protected void ensureThatIsbnIsValid(String identifier) throws FetcherException { ISBN isbn = new ISBN(identifier); if (!isbn.isValid()) { throw new FetcherException(Localization.lang("Invalid ISBN: '%0'.", identifier)); } } @Override public Parser getParser() { return new BibtexParser(importFormatPreferences); } }
1,265
31.461538
93
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ApsFetcher.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLConnection; import java.util.Objects; import java.util.Optional; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import kong.unirest.Unirest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * FulltextFetcher implementation that attempts to find a PDF URL at APS. Also see the <a * href="https://harvest.aps.org/docs/harvest-api">API</a>, although it isn't currently used. */ public class ApsFetcher implements FulltextFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(ApsFetcher.class); // The actual API needs either an API key or a header. This is a workaround. private static final String DOI_URL = "https://www.doi.org/"; private static final String PDF_URL = "https://journals.aps.org/prl/pdf/"; @Override public Optional<URL> findFullText(BibEntry entry) throws IOException { Objects.requireNonNull(entry); Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse); if (!doi.isPresent()) { return Optional.empty(); } Optional<String> id = getId(doi.get().getDOI()); if (id.isPresent()) { String pdfRequestUrl = PDF_URL + id.get(); int code = Unirest.head(pdfRequestUrl).asJson().getStatus(); if (code == 200) { LOGGER.info("Fulltext PDF found @ APS."); try { return Optional.of(new URL(pdfRequestUrl)); } catch (MalformedURLException e) { LOGGER.warn("APS returned malformed URL, cannot find PDF."); } } } return Optional.empty(); } @Override public TrustLevel getTrustLevel() { return TrustLevel.PUBLISHER; } /** * Convert a DOI into an appropriate APS id. * * @param doi A case insensitive DOI * @return A DOI cased as APS likes it */ private Optional<String> getId(String doi) { // DOI is not case sensitive, but the id for the PDF URL is, // so we follow DOI.org redirects to get the proper id. // https://stackoverflow.com/a/5270162/1729441 String doiRequest = DOI_URL + doi; URLConnection con; try { con = new URL(doiRequest).openConnection(); con.connect(); con.getInputStream(); String[] urlParts = con.getURL().toString().split("abstract/"); if (urlParts.length == 2) { return Optional.of(urlParts[1]); } } catch (IOException e) { LOGGER.warn("Error connecting to APS", e); } return Optional.empty(); } }
2,963
31.217391
93
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ArXivFetcher.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import org.jabref.logic.cleanup.EprintCleanup; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.logic.importer.IdBasedFetcher; import org.jabref.logic.importer.IdFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.PagedSearchBasedFetcher; import org.jabref.logic.importer.fetcher.transformers.ArXivQueryTransformer; import org.jabref.logic.util.io.XMLUtil; import org.jabref.logic.util.strings.StringSimilarity; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.KeywordList; import org.jabref.model.entry.LinkedFile; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.InternalField; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.ArXivIdentifier; import org.jabref.model.entry.identifier.DOI; import org.jabref.model.entry.types.StandardEntryType; import org.jabref.model.paging.Page; import org.jabref.model.strings.StringUtil; import org.jabref.model.util.OptionalUtil; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.xml.sax.SAXException; /** * Fetcher for ArXiv that merges fields from arXiv-issued DOIs (and user-issued ones when applicable) to get more information overall. * <p> * These are the post-processing steps applied to the original fetch from ArXiv's API: * <ol> * <li>Use ArXiv-issued DOI to get more merge more data with original entry, overwriting some of those fields;</li> * <li>Use user-issued DOI (if it was provided) to merge even more data with the result of the previous step, overwriting some of those fields;</li> * <li>Modify keywords: remove repetitions and adapt some edge cases (commas in keyword transformed into forward slashes).</li> * </ol> * * @see <a href="https://blog.arxiv.org/2022/02/17/new-arxiv-articles-are-now-automatically-assigned-dois/">arXiv.org blog </a> for more info about arXiv-issued DOIs * @see <a href="https://arxiv.org/help/api/index">ArXiv API</a> for an overview of the API * @see <a href="https://arxiv.org/help/api/user-manual#_calling_the_api">ArXiv API User's Manual</a> for a detailed description on how to use the API */ public class ArXivFetcher implements FulltextFetcher, PagedSearchBasedFetcher, IdBasedFetcher, IdFetcher<ArXivIdentifier> { private static final Logger LOGGER = LoggerFactory.getLogger(ArXivFetcher.class); // See https://blog.arxiv.org/2022/02/17/new-arxiv-articles-are-now-automatically-assigned-dois/ private static final String DOI_PREFIX = "10.48550/arXiv."; /* * Reason behind choice of these fields: * - KEYWORDS: More descriptive * - AUTHOR: Better formatted (last name, rest of name) */ private static final Set<Field> CHOSEN_AUTOMATIC_DOI_FIELDS = Set.of(StandardField.KEYWORDS, StandardField.AUTHOR); /* * Reason behind choice of these fields: * - DOI: give preference to DOIs manually inputted by users, instead of automatic ones * - PUBLISHER: ArXiv-issued DOIs give 'ArXiv' as entry publisher. While this can be true, prefer using one from external sources, * if applicable * - KEY_FIELD: Usually, the KEY_FIELD retrieved from user-assigned DOIs are 'nicer' (instead of a DOI link, it's usually contains one author and the year) */ private static final Set<Field> CHOSEN_MANUAL_DOI_FIELDS = Set.of(StandardField.DOI, StandardField.PUBLISHER, InternalField.KEY_FIELD); private static final Map<String, String> ARXIV_KEYWORDS_WITH_COMMA_REPLACEMENTS = Collections.unmodifiableMap(Map.of( "Computational Engineering, Finance, and Science", "Computational Engineering / Finance / Science", "Distributed, Parallel, and Cluster Computing", "Distributed / Parallel / Cluster Computing")); private final ArXiv arXiv; private final DoiFetcher doiFetcher; private final ImportFormatPreferences importFormatPreferences; public ArXivFetcher(ImportFormatPreferences importFormatPreferences) { this(importFormatPreferences, new DoiFetcher(importFormatPreferences)); } /** * @param doiFetcher The fetcher, maybe be NULL if no additional search is desired. */ public ArXivFetcher(ImportFormatPreferences importFormatPreferences, DoiFetcher doiFetcher) { this.arXiv = new ArXiv(importFormatPreferences); this.doiFetcher = doiFetcher; this.importFormatPreferences = importFormatPreferences; } @Override public Optional<URL> findFullText(BibEntry entry) throws IOException { return arXiv.findFullText(entry); } @Override public TrustLevel getTrustLevel() { return arXiv.getTrustLevel(); } @Override public String getName() { return arXiv.getName(); } @Override public Optional<HelpFile> getHelpPage() { return arXiv.getHelpPage(); } /** * Remove duplicate values on "KEYWORD" field, if any. Al * * @param bibEntry A BibEntry to modify */ private void adaptKeywordsFrom(BibEntry bibEntry) { Optional<String> allKeywords = bibEntry.getField(StandardField.KEYWORDS); if (allKeywords.isPresent()) { // With the use of ArXiv-issued DOI's KEYWORDS field, some of those keywords might contain comma. As this is the // default keyword separator, replace the commas of these instances with some other character // (see ARXIV_KEYWORDS_WITH_COMMA_REPLACEMENTS variable) for (Map.Entry<String, String> entry : ARXIV_KEYWORDS_WITH_COMMA_REPLACEMENTS.entrySet()) { allKeywords = Optional.of(allKeywords.get().replaceAll(entry.getKey(), entry.getValue())); } String filteredKeywords = KeywordList.merge( allKeywords.get(), "", importFormatPreferences.bibEntryPreferences().getKeywordSeparator()).toString(); bibEntry.setField(StandardField.KEYWORDS, filteredKeywords); } } /** * Get ArXiv-issued DOI from the entry's arXiv ID * <br/><br/> * ArXiv-issued DOIs are identifiers associated with every ArXiv entry. They are composed of a fixed * {@link #DOI_PREFIX} + the entry's ArXiv ID * * @param arXivId An ArXiv ID * @return ArXiv-issued DOI */ private static String getAutomaticDoi(String arXivId) { return DOI_PREFIX + arXivId; } /** * Get ArXiv-issued DOI from the arXiv entry itself. * <br/><br/> * ArXiv-issued DOIs are identifiers associated with every ArXiv entry. They are composed of a fixed {@link #DOI_PREFIX} + the entry's ArXiv ID * * @param arXivBibEntry A Bibtex Entry, formatted as a ArXiv entry. Must contain an EPRINT field * @return ArXiv-issued DOI, or Empty, if method could not retrieve it */ private static Optional<String> getAutomaticDoi(BibEntry arXivBibEntry) { // As the input should always contain a EPRINT if created from inner 'ArXiv' class, don't bother doing a check that might call // ArXiv's API again (method 'findIdentifier') Optional<String> entryEprint = arXivBibEntry.getField(StandardField.EPRINT); if (entryEprint.isEmpty()) { LOGGER.error("Cannot infer ArXiv-issued DOI from BibEntry: no 'EPRINT' field found"); return Optional.empty(); } else { return Optional.of(ArXivFetcher.getAutomaticDoi(entryEprint.get())); } } /** * Get ArXiv-issued DOI from ArXiv Identifier object * <br/><br/> * ArXiv-issued DOIs are identifiers associated with every ArXiv entry. They are composed of a fixed {@link #DOI_PREFIX} + the entry's ArXiv ID * * @param arXivId An ArXiv ID as internal object * @return ArXiv-issued DOI */ private static String getAutomaticDoi(ArXivIdentifier arXivId) { return getAutomaticDoi(arXivId.getNormalizedWithoutVersion()); } /** * Check if a specific DOI is user-assigned. */ private static boolean isManualDoi(String doi) { return !doi.toLowerCase().contains(DOI_PREFIX.toLowerCase()); } /** * Get user-issued DOI from ArXiv Bibtex entry, if any * <br/><br/> * User-issued DOIs are identifiers associated with some ArXiv entries that can associate an entry with an external service, like * <a href="https://link.springer.com/">Springer Link</a>. * * @param arXivBibEntry An ArXiv Bibtex entry from where the DOI is extracted * @return User-issued DOI, if any field exists and if it's not an automatic one (see {@link #getAutomaticDoi(ArXivIdentifier)}) */ private static Optional<String> getManualDoi(BibEntry arXivBibEntry) { return arXivBibEntry.getField(StandardField.DOI).filter(ArXivFetcher::isManualDoi); } /** * Get the Bibtex Entry from a Future API request (uses blocking) and treat exceptions. * * @param bibEntryFuture A CompletableFuture that parallelize the API fetching process * @return the fetch result */ private static Optional<BibEntry> waitForBibEntryRetrieval(CompletableFuture<Optional<BibEntry>> bibEntryFuture) throws FetcherException { try { return bibEntryFuture.join(); } catch (CompletionException e) { if (!(e.getCause() instanceof FetcherException)) { LOGGER.error("The supplied future should only throw a FetcherException.", e); throw e; } throw (FetcherException) e.getCause(); } } /** * Eventually merge the ArXiv Bibtex entry with a Future Bibtex entry (ArXiv/user-assigned DOIs) * * @param arXivEntry The entry to merge into * @param bibEntryFuture A future result of the fetching process * @param priorityFields Which fields from "bibEntryFuture" to prioritize, replacing them on "arXivEntry" * @param id Identifier used in initiating the "bibEntryFuture" future (for logging). This is usually a DOI, but can be anything. */ private void mergeArXivEntryWithFutureDoiEntry(BibEntry arXivEntry, CompletableFuture<Optional<BibEntry>> bibEntryFuture, Set<Field> priorityFields, String id) { Optional<BibEntry> bibEntry; try { bibEntry = waitForBibEntryRetrieval(bibEntryFuture); } catch (FetcherException | CompletionException e) { LOGGER.error("Failed to fetch future BibEntry with id '{}' (skipping merge).", id, e); return; } if (bibEntry.isPresent()) { adaptKeywordsFrom(bibEntry.get()); arXivEntry.mergeWith(bibEntry.get(), priorityFields); } else { LOGGER.error("Future BibEntry for id '{}' was completed, but no entry was found (skipping merge).", id); } } /** * Infuse arXivBibEntryPromise with additional fields in an asynchronous way * * @param arXivBibEntry An existing entry to be updated with new/modified fields */ private void inplaceAsyncInfuseArXivWithDoi(BibEntry arXivBibEntry) { CompletableFuture<Optional<BibEntry>> arXivBibEntryCompletedFuture = CompletableFuture.completedFuture(Optional.of(arXivBibEntry)); Optional<ArXivIdentifier> arXivBibEntryId = arXivBibEntry.getField(StandardField.EPRINT).flatMap(ArXivIdentifier::parse); try { this.inplaceAsyncInfuseArXivWithDoi(arXivBibEntryCompletedFuture, arXivBibEntryId); } catch (FetcherException e) { LOGGER.error("FetcherException should not be found here, as main Bibtex Entry already exists " + "(and failing additional fetches should be skipped)", e); } } /** * Infuse arXivBibEntryFuture with additional fields in an asynchronous way, accelerating the process by providing a valid ArXiv ID * * @param arXivBibEntryFuture A future entry that (if it exists) will be updated with new/modified fields * @param arXivId An ArXiv ID for the main reference (from ArXiv), so that the retrieval of ArXiv-issued DOI metadata can be faster * @throws FetcherException when failed to fetch the main ArtXiv Bibtex entry ('arXivBibEntryFuture'). */ private void inplaceAsyncInfuseArXivWithDoi(CompletableFuture<Optional<BibEntry>> arXivBibEntryFuture, Optional<ArXivIdentifier> arXivId) throws FetcherException { Optional<CompletableFuture<Optional<BibEntry>>> automaticDoiBibEntryFuture; Optional<BibEntry> arXivBibEntry; Optional<String> automaticDoi; Optional<String> manualDoi; // We can accelerate the processing time by initiating a parallel request for DOIFetcher with an ArXiv-issued DOI alongside the ArXiv fetching itself, // BUT ONLY IF we have a valid arXivId. If not, the ArXiv entry must be retrieved before, which invalidates this optimization (although we can still speed // up the process by running both the ArXiv-assigned and user-assigned DOI fetching at the same time, if an entry has this last information) if (arXivId.isPresent()) { automaticDoi = Optional.of(ArXivFetcher.getAutomaticDoi(arXivId.get())); automaticDoiBibEntryFuture = Optional.of(doiFetcher.asyncPerformSearchById(automaticDoi.get())); arXivBibEntry = ArXivFetcher.waitForBibEntryRetrieval(arXivBibEntryFuture); if (arXivBibEntry.isEmpty()) { return; } } else { // If ArXiv fetch fails (FetcherException), exception must be passed onwards for the transparency of this class (original ArXiv fetcher does the same) arXivBibEntry = ArXivFetcher.waitForBibEntryRetrieval(arXivBibEntryFuture); if (arXivBibEntry.isEmpty()) { return; } automaticDoi = ArXivFetcher.getAutomaticDoi(arXivBibEntry.get()); automaticDoiBibEntryFuture = automaticDoi.map(arXiv::asyncPerformSearchById); } manualDoi = ArXivFetcher.getManualDoi(arXivBibEntry.get()); Optional<CompletableFuture<Optional<BibEntry>>> manualDoiBibEntryFuture = manualDoi.map(doiFetcher::asyncPerformSearchById); automaticDoiBibEntryFuture.ifPresent(future -> mergeArXivEntryWithFutureDoiEntry(arXivBibEntry.get(), future, CHOSEN_AUTOMATIC_DOI_FIELDS, automaticDoi.get())); manualDoiBibEntryFuture.ifPresent(future -> mergeArXivEntryWithFutureDoiEntry(arXivBibEntry.get(), future, CHOSEN_MANUAL_DOI_FIELDS, manualDoi.get())); } /** * Constructs a complex query string using the field prefixes specified at https://arxiv.org/help/api/user-manual * and modify resulting BibEntries with additional info from the ArXiv-issued DOI * * @param luceneQuery the root node of the lucene query * @return A list of entries matching the complex query */ @Override public Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException { Page<BibEntry> result = arXiv.performSearchPaged(luceneQuery, pageNumber); if (this.doiFetcher == null) { return result; } ExecutorService executor = Executors.newFixedThreadPool(getPageSize() * 2); Collection<CompletableFuture<BibEntry>> futureSearchResult = result.getContent() .stream() .map(bibEntry -> CompletableFuture.supplyAsync(() -> { this.inplaceAsyncInfuseArXivWithDoi(bibEntry); return bibEntry; }, executor)) .toList(); Collection<BibEntry> modifiedSearchResult = futureSearchResult.stream() .map(CompletableFuture::join) .collect(Collectors.toList()); return new Page<>(result.getQuery(), result.getPageNumber(), modifiedSearchResult); } @Override public Optional<BibEntry> performSearchById(String identifier) throws FetcherException { CompletableFuture<Optional<BibEntry>> arXivBibEntryPromise = arXiv.asyncPerformSearchById(identifier); if (this.doiFetcher != null) { inplaceAsyncInfuseArXivWithDoi(arXivBibEntryPromise, ArXivIdentifier.parse(identifier)); } return arXivBibEntryPromise.join(); } @Override public Optional<ArXivIdentifier> findIdentifier(BibEntry entry) throws FetcherException { return arXiv.findIdentifier(entry); } @Override public String getIdentifierName() { return arXiv.getIdentifierName(); } /** * Fetcher for the arXiv. * * @see <a href="https://arxiv.org/help/api/index">ArXiv API</a> for an overview of the API * @see <a href="https://arxiv.org/help/api/user-manual#_calling_the_api">ArXiv API User's Manual</a> for a detailed * description on how to use the API * <p> * Similar implementions: * <a href="https://github.com/nathangrigg/arxiv2bib">arxiv2bib</a> which is <a href="https://arxiv2bibtex.org/">live</a> * <a href="https://gitlab.c3sl.ufpr.br/portalmec/dspace-portalmec/blob/aa209d15082a9870f9daac42c78a35490ce77b52/dspace-api/src/main/java/org/dspace/submit/lookup/ArXivService.java">dspace-portalmec</a> */ protected class ArXiv implements FulltextFetcher, PagedSearchBasedFetcher, IdBasedFetcher, IdFetcher<ArXivIdentifier> { private static final Logger LOGGER = LoggerFactory.getLogger(org.jabref.logic.importer.fetcher.ArXivFetcher.ArXiv.class); private static final String API_URL = "https://export.arxiv.org/api/query"; private final ImportFormatPreferences importFormatPreferences; public ArXiv(ImportFormatPreferences importFormatPreferences) { this.importFormatPreferences = importFormatPreferences; } @Override public Optional<URL> findFullText(BibEntry entry) throws IOException { Objects.requireNonNull(entry); try { Optional<URL> pdfUrl = searchForEntries(entry).stream() .map(ArXivEntry::getPdfUrl) .filter(Optional::isPresent) .map(Optional::get) .findFirst(); pdfUrl.ifPresent(url -> LOGGER.info("Fulltext PDF found @ arXiv.")); return pdfUrl; } catch (FetcherException e) { LOGGER.warn("arXiv API request failed", e); } return Optional.empty(); } @Override public TrustLevel getTrustLevel() { return TrustLevel.PREPRINT; } private Optional<ArXivEntry> searchForEntry(String searchQuery) throws FetcherException { List<ArXivEntry> entries = queryApi(searchQuery, Collections.emptyList(), 0, 1); if (entries.size() == 1) { return Optional.of(entries.get(0)); } else { return Optional.empty(); } } private Optional<ArXivEntry> searchForEntryById(String id) throws FetcherException { Optional<ArXivIdentifier> identifier = ArXivIdentifier.parse(id); if (identifier.isEmpty()) { return Optional.empty(); } List<ArXivEntry> entries = queryApi("", Collections.singletonList(identifier.get()), 0, 1); if (entries.size() >= 1) { return Optional.of(entries.get(0)); } else { return Optional.empty(); } } private List<ArXivEntry> searchForEntries(BibEntry originalEntry) throws FetcherException { // We need to clone the entry, because we modify it by a cleanup job. final BibEntry entry = (BibEntry) originalEntry.clone(); // 1. Check for Eprint new EprintCleanup().cleanup(entry); Optional<String> identifier = entry.getField(StandardField.EPRINT); if (StringUtil.isNotBlank(identifier)) { try { // Get pdf of entry with the specified id return OptionalUtil.toList(searchForEntryById(identifier.get())); } catch (FetcherException e) { LOGGER.warn("arXiv eprint API request failed", e); } } // 2. DOI and other fields String query; Optional<String> doiString = entry.getField(StandardField.DOI) .flatMap(DOI::parse) .map(DOI::getNormalized); // ArXiv-issued DOIs seem to be unsearchable from ArXiv API's "query string", so ignore it if (doiString.isPresent() && ArXivFetcher.isManualDoi(doiString.get())) { query = "doi:" + doiString.get(); } else { Optional<String> authorQuery = entry.getField(StandardField.AUTHOR).map(author -> "au:" + author); Optional<String> titleQuery = entry.getField(StandardField.TITLE).map(title -> "ti:" + StringUtil.ignoreCurlyBracket(title)); query = String.join("+AND+", OptionalUtil.toList(authorQuery, titleQuery)); } Optional<ArXivEntry> arxivEntry = searchForEntry(query); if (arxivEntry.isPresent()) { // Check if entry is a match StringSimilarity match = new StringSimilarity(); String arxivTitle = arxivEntry.get().title.orElse(""); String entryTitle = StringUtil.ignoreCurlyBracket(entry.getField(StandardField.TITLE).orElse("")); if (match.isSimilar(arxivTitle, entryTitle)) { return OptionalUtil.toList(arxivEntry); } } return Collections.emptyList(); } private List<ArXivEntry> searchForEntries(String searchQuery, int pageNumber) throws FetcherException { return queryApi(searchQuery, Collections.emptyList(), getPageSize() * pageNumber, getPageSize()); } private List<ArXivEntry> queryApi(String searchQuery, List<ArXivIdentifier> ids, int start, int maxResults) throws FetcherException { Document result = callApi(searchQuery, ids, start, maxResults); List<Node> entries = XMLUtil.asList(result.getElementsByTagName("entry")); return entries.stream().map(ArXivEntry::new).collect(Collectors.toList()); } /** * Queries the API. * <p> * If only {@code searchQuery} is given, then the API will return results for each article that matches the query. * If only {@code ids} is given, then the API will return results for each article in the list. * If both {@code searchQuery} and {@code ids} are given, then the API will return each article in * {@code ids} that matches {@code searchQuery}. This allows the API to act as a results filter. * * @param searchQuery the search query used to find articles; * <a href="http://arxiv.org/help/api/user-manual#query_details">details</a> * @param ids a list of arXiv identifiers * @param start the index of the first returned result (zero-based) * @param maxResults the number of maximal results (has to be smaller than 2000) * @return the response from the API as a XML document (Atom 1.0) * @throws FetcherException if there was a problem while building the URL or the API was not accessible */ private Document callApi(String searchQuery, List<ArXivIdentifier> ids, int start, int maxResults) throws FetcherException { if (maxResults > 2000) { throw new IllegalArgumentException("The arXiv API limits the number of maximal results to be 2000"); } try { URIBuilder uriBuilder = new URIBuilder(API_URL); // The arXiv API has problems with accents, so we remove them (i.e. Fréchet -> Frechet) if (StringUtil.isNotBlank(searchQuery)) { uriBuilder.addParameter("search_query", StringUtil.stripAccents(searchQuery)); } if (!ids.isEmpty()) { uriBuilder.addParameter("id_list", ids.stream().map(ArXivIdentifier::getNormalized).collect(Collectors.joining(","))); } uriBuilder.addParameter("start", String.valueOf(start)); uriBuilder.addParameter("max_results", String.valueOf(maxResults)); URL url = uriBuilder.build().toURL(); DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = factory.newDocumentBuilder(); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); if (connection.getResponseCode() == 400) { // Bad request error from server, try to get more information throw getException(builder.parse(connection.getErrorStream())); } else { return builder.parse(connection.getInputStream()); } } catch (SAXException | ParserConfigurationException | IOException | URISyntaxException exception) { throw new FetcherException("arXiv API request failed", exception); } } private FetcherException getException(Document error) { List<Node> entries = XMLUtil.asList(error.getElementsByTagName("entry")); // Check if the API returned an error // In case of an error, only one entry will be returned with the error information. For example: // https://export.arxiv.org/api/query?id_list=0307015 // <entry> // <id>https://arxiv.org/api/errors#incorrect_id_format_for_0307015</id> // <title>Error</title> // <summary>incorrect id format for 0307015</summary> // </entry> if (entries.size() == 1) { Node node = entries.get(0); Optional<String> id = XMLUtil.getNodeContent(node, "id"); Boolean isError = id.map(idContent -> idContent.startsWith("http://arxiv.org/api/errors")).orElse(false); if (isError) { String errorMessage = XMLUtil.getNodeContent(node, "summary").orElse("Unknown error"); return new FetcherException(errorMessage); } } return new FetcherException("arXiv API request failed"); } @Override public String getName() { return "ArXiv"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_OAI2_ARXIV); } /** * Constructs a complex query string using the field prefixes specified at https://arxiv.org/help/api/user-manual * * @param luceneQuery the root node of the lucene query * @return A list of entries matching the complex query */ @Override public Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException { ArXivQueryTransformer transformer = new ArXivQueryTransformer(); String transformedQuery = transformer.transformLuceneQuery(luceneQuery).orElse(""); List<BibEntry> searchResult = searchForEntries(transformedQuery, pageNumber) .stream() .map(arXivEntry -> arXivEntry.toBibEntry(importFormatPreferences.bibEntryPreferences().getKeywordSeparator())) .collect(Collectors.toList()); return new Page<>(transformedQuery, pageNumber, filterYears(searchResult, transformer)); } private List<BibEntry> filterYears(List<BibEntry> searchResult, ArXivQueryTransformer transformer) { return searchResult.stream() .filter(entry -> entry.getField(StandardField.DATE).isPresent()) // Filter the date field for year only .filter(entry -> transformer.getEndYear().isEmpty() || (Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) <= transformer.getEndYear().get())) .filter(entry -> transformer.getStartYear().isEmpty() || (Integer.parseInt(entry.getField(StandardField.DATE).get().substring(0, 4)) >= transformer.getStartYear().get())) .collect(Collectors.toList()); } protected CompletableFuture<Optional<BibEntry>> asyncPerformSearchById(String identifier) throws CompletionException { return CompletableFuture.supplyAsync(() -> { try { return performSearchById(identifier); } catch (FetcherException e) { throw new CompletionException(e); } }); } @Override public Optional<BibEntry> performSearchById(String identifier) throws FetcherException { return searchForEntryById(identifier) .map(arXivEntry -> arXivEntry.toBibEntry(importFormatPreferences.bibEntryPreferences().getKeywordSeparator())); } @Override public Optional<ArXivIdentifier> findIdentifier(BibEntry entry) throws FetcherException { return searchForEntries(entry).stream() .map(ArXivEntry::getId) .filter(Optional::isPresent) .map(Optional::get) .findFirst(); } @Override public String getIdentifierName() { return "ArXiv"; } private static class ArXivEntry { private final Optional<String> title; private final Optional<String> urlAbstractPage; private final Optional<String> publishedDate; private final Optional<String> abstractText; private final List<String> authorNames; private final List<String> categories; private final Optional<URL> pdfUrl; private final Optional<String> doi; private final Optional<String> journalReferenceText; private final Optional<String> primaryCategory; public ArXivEntry(Node item) { // see https://arxiv.org/help/api/user-manual#_details_of_atom_results_returned // Title of the article // The result from the arXiv contains hard line breaks, try to remove them title = XMLUtil.getNodeContent(item, "title").map(ArXivEntry::correctLineBreaks); // The url leading to the abstract page urlAbstractPage = XMLUtil.getNodeContent(item, "id"); // Date on which the first version was published publishedDate = XMLUtil.getNodeContent(item, "published"); // Abstract of the article abstractText = XMLUtil.getNodeContent(item, "summary").map(ArXivEntry::correctLineBreaks) .map(String::trim); // Authors of the article authorNames = new ArrayList<>(); for (Node authorNode : XMLUtil.getNodesByName(item, "author")) { Optional<String> authorName = XMLUtil.getNodeContent(authorNode, "name").map(String::trim); authorName.ifPresent(authorNames::add); } // Categories (arXiv, ACM, or MSC classification) categories = new ArrayList<>(); for (Node categoryNode : XMLUtil.getNodesByName(item, "category")) { Optional<String> category = XMLUtil.getAttributeContent(categoryNode, "term"); category.ifPresent(categories::add); } // Links Optional<URL> pdfUrlParsed = Optional.empty(); for (Node linkNode : XMLUtil.getNodesByName(item, "link")) { Optional<String> linkTitle = XMLUtil.getAttributeContent(linkNode, "title"); if (linkTitle.equals(Optional.of("pdf"))) { pdfUrlParsed = XMLUtil.getAttributeContent(linkNode, "href").map(url -> { try { return new URL(url); } catch (MalformedURLException e) { return null; } }); } } pdfUrl = pdfUrlParsed; // Associated DOI doi = XMLUtil.getNodeContent(item, "arxiv:doi"); // Journal reference (as provided by the author) journalReferenceText = XMLUtil.getNodeContent(item, "arxiv:journal_ref"); // Primary category // Ex: <arxiv:primary_category xmlns:arxiv="https://arxiv.org/schemas/atom" term="math-ph" scheme="http://arxiv.org/schemas/atom"/> primaryCategory = XMLUtil.getNode(item, "arxiv:primary_category") .flatMap(node -> XMLUtil.getAttributeContent(node, "term")); } public static String correctLineBreaks(String s) { String result = s.replaceAll("\\n(?!\\s*\\n)", " "); result = result.replaceAll("\\s*\\n\\s*", "\n"); return result.replaceAll(" {2,}", " ").replaceAll("(^\\s*|\\s+$)", ""); } /** * Returns the url of the linked pdf */ public Optional<URL> getPdfUrl() { return pdfUrl; } /** * Returns the arXiv identifier */ public Optional<String> getIdString() { return urlAbstractPage.flatMap(ArXivIdentifier::parse).map(ArXivIdentifier::getNormalizedWithoutVersion); } public Optional<ArXivIdentifier> getId() { return getIdString().flatMap(ArXivIdentifier::parse); } /** * Returns the date when the first version was put on the arXiv */ public Optional<String> getDate() { // Publication string also contains time, e.g. 2014-05-09T14:49:43Z return publishedDate.map(date -> { if (date.length() < 10) { return null; } else { return date.substring(0, 10); } }); } public BibEntry toBibEntry(Character keywordDelimiter) { BibEntry bibEntry = new BibEntry(StandardEntryType.Article); bibEntry.setField(StandardField.EPRINTTYPE, "arXiv"); bibEntry.setField(StandardField.AUTHOR, String.join(" and ", authorNames)); bibEntry.addKeywords(categories, keywordDelimiter); getIdString().ifPresent(id -> bibEntry.setField(StandardField.EPRINT, id)); title.ifPresent(titleContent -> bibEntry.setField(StandardField.TITLE, titleContent)); doi.ifPresent(doiContent -> bibEntry.setField(StandardField.DOI, doiContent)); abstractText.ifPresent(abstractContent -> bibEntry.setField(StandardField.ABSTRACT, abstractContent)); getDate().ifPresent(date -> bibEntry.setField(StandardField.DATE, date)); primaryCategory.ifPresent(category -> bibEntry.setField(StandardField.EPRINTCLASS, category)); journalReferenceText.ifPresent(journal -> bibEntry.setField(StandardField.JOURNAL, journal)); getPdfUrl().ifPresent(url -> bibEntry.setFiles(Collections.singletonList(new LinkedFile(url, "PDF")))); return bibEntry; } } } }
38,017
47.992268
206
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/AstrophysicsDataSystem.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import org.jabref.logic.cleanup.FieldFormatterCleanup; import org.jabref.logic.cleanup.MoveFieldCleanup; import org.jabref.logic.formatter.bibtexfields.ClearFormatter; import org.jabref.logic.formatter.bibtexfields.NormalizeMonthFormatter; import org.jabref.logic.formatter.bibtexfields.NormalizeNamesFormatter; import org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter; import org.jabref.logic.formatter.bibtexfields.RemoveNewlinesFormatter; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.EntryBasedParserFetcher; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.IdBasedParserFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ImporterPreferences; import org.jabref.logic.importer.PagedSearchBasedParserFetcher; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.net.URLDownload; import org.jabref.logic.preferences.FetcherApiKey; import org.jabref.logic.util.BuildInfo; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.field.UnknownField; import org.jabref.model.paging.Page; import org.jabref.model.strings.StringUtil; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONException; import kong.unirest.json.JSONObject; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** * Fetches data from the SAO/NASA Astrophysics Data System (<a href="https://ui.adsabs.harvard.edu/">https://ui.adsabs.harvard.edu/</a>) */ public class AstrophysicsDataSystem implements IdBasedParserFetcher, PagedSearchBasedParserFetcher, EntryBasedParserFetcher, CustomizableKeyFetcher { private static final String API_SEARCH_URL = "https://api.adsabs.harvard.edu/v1/search/query"; private static final String API_EXPORT_URL = "https://api.adsabs.harvard.edu/v1/export/bibtexabs"; private static final String API_KEY = new BuildInfo().astrophysicsDataSystemAPIKey; private final ImportFormatPreferences preferences; private final ImporterPreferences importerPreferences; public AstrophysicsDataSystem(ImportFormatPreferences preferences, ImporterPreferences importerPreferences) { this.preferences = Objects.requireNonNull(preferences); this.importerPreferences = importerPreferences; } /** * @param bibcodes collection of bibcodes for which a JSON object should be created */ private static String buildPostData(Collection<String> bibcodes) { JSONObject obj = new JSONObject(); obj.put("bibcode", bibcodes); return obj.toString(); } /** * @return export URL endpoint */ private static URL getURLforExport() throws URISyntaxException, MalformedURLException { return new URIBuilder(API_EXPORT_URL).build().toURL(); } @Override public String getName() { return "SAO/NASA ADS"; } private String getApiKey() { return importerPreferences.getApiKeys() .stream() .filter(key -> key.getName().equalsIgnoreCase(this.getName())) .filter(FetcherApiKey::shouldUse) .findFirst() .map(FetcherApiKey::getKey) .orElse(API_KEY); } /** * @param luceneQuery query string, matching the apache solr format * @return URL which points to a search request for given query */ @Override public URL getURLForQuery(QueryNode luceneQuery, int pageNumber) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder builder = new URIBuilder(API_SEARCH_URL); builder.addParameter("q", new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); builder.addParameter("fl", "bibcode"); builder.addParameter("rows", String.valueOf(getPageSize())); builder.addParameter("start", String.valueOf(getPageSize() * pageNumber)); return builder.build().toURL(); } /** * @param entry BibEntry for which a search URL is created * @return URL which points to a search request for given entry */ @Override public URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException { StringBuilder stringBuilder = new StringBuilder(); Optional<String> title = entry.getFieldOrAlias(StandardField.TITLE).map(t -> "title:\"" + t + "\""); Optional<String> author = entry.getFieldOrAlias(StandardField.AUTHOR).map(a -> "author:\"" + a + "\""); if (title.isPresent()) { stringBuilder.append(title.get()) .append(author.map(s -> " AND " + s) .orElse("")); } else { stringBuilder.append(author.orElse("")); } String query = stringBuilder.toString().trim(); URIBuilder builder = new URIBuilder(API_SEARCH_URL); builder.addParameter("q", query); builder.addParameter("fl", "bibcode"); builder.addParameter("rows", "20"); return builder.build().toURL(); } /** * @param identifier bibcode or doi for which a search URL is created * @return URL which points to a search URL for given identifier */ @Override public URL getUrlForIdentifier(String identifier) throws FetcherException, URISyntaxException, MalformedURLException { String query = "doi:\"" + identifier + "\" OR " + "bibcode:\"" + identifier + "\""; URIBuilder builder = new URIBuilder(API_SEARCH_URL); builder.addParameter("q", query); builder.addParameter("fl", "bibcode"); return builder.build().toURL(); } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_ADS); } @Override public Parser getParser() { return new BibtexParser(preferences); } @Override public void doPostCleanup(BibEntry entry) { new FieldFormatterCleanup(StandardField.ABSTRACT, new RemoveBracesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.ABSTRACT, new RemoveNewlinesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.TITLE, new RemoveBracesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.AUTHOR, new NormalizeNamesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.MONTH, new NormalizeMonthFormatter()).cleanup(entry); // Remove ADS note new FieldFormatterCleanup(new UnknownField("adsnote"), new ClearFormatter()).cleanup(entry); // Move adsurl to url field new MoveFieldCleanup(new UnknownField("adsurl"), StandardField.URL).cleanup(entry); entry.getField(StandardField.ABSTRACT) .filter(abstractText -> "Not Available <P />".equals(abstractText)) .ifPresent(abstractText -> entry.clearField(StandardField.ABSTRACT)); entry.getField(StandardField.ABSTRACT) .map(abstractText -> abstractText.replace("<P />", "")) .map(abstractText -> abstractText.replace("\\textbackslash", "")) .map(String::trim) .ifPresent(abstractText -> entry.setField(StandardField.ABSTRACT, abstractText)); // The fetcher adds some garbage (number of found entries etc before) entry.setCommentsBeforeEntry(""); } @Override public List<BibEntry> performSearch(BibEntry entry) throws FetcherException { if (entry.getFieldOrAlias(StandardField.TITLE).isEmpty() && entry.getFieldOrAlias(StandardField.AUTHOR).isEmpty()) { return Collections.emptyList(); } try { List<String> bibcodes = fetchBibcodes(getURLForEntry(entry)); return performSearchByIds(bibcodes); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { throw new FetcherException("A network error occurred", e); } } /** * @param url search ul for which bibcode will be returned * @return list of bibcodes matching the search request. May be empty */ private List<String> fetchBibcodes(URL url) throws FetcherException { try { URLDownload download = getUrlDownload(url); String content = download.asString(); JSONObject obj = new JSONObject(content); JSONArray codes = obj.getJSONObject("response").getJSONArray("docs"); List<String> bibcodes = new ArrayList<>(); for (int i = 0; i < codes.length(); i++) { bibcodes.add(codes.getJSONObject(i).getString("bibcode")); } return bibcodes; } catch (IOException e) { throw new FetcherException("A network error occurred", e); } catch (JSONException e) { return Collections.emptyList(); } } @Override public Optional<BibEntry> performSearchById(String identifier) throws FetcherException { if (StringUtil.isBlank(identifier)) { return Optional.empty(); } try { List<String> bibcodes = fetchBibcodes(getUrlForIdentifier(identifier)); List<BibEntry> fetchedEntries = performSearchByIds(bibcodes); if (fetchedEntries.isEmpty()) { return Optional.empty(); } if (fetchedEntries.size() > 1) { LOGGER.info("Fetcher " + getName() + "found more than one result for identifier " + identifier + ". We will use the first entry."); } BibEntry entry = fetchedEntries.get(0); return Optional.of(entry); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { throw new FetcherException("A network error occurred", e); } } /** * @param identifiers bibcodes for which bibentries ahould be fetched * @return list of bibentries matching the bibcodes. Can be empty and differ in size to the size of requested bibcodes */ private List<BibEntry> performSearchByIds(Collection<String> identifiers) throws FetcherException { List<String> ids = identifiers.stream().filter(identifier -> !StringUtil.isBlank(identifier)).collect(Collectors.toList()); if (ids.isEmpty()) { return Collections.emptyList(); } try { String postData = buildPostData(ids); URLDownload download = new URLDownload(getURLforExport()); download.addHeader("Authorization", "Bearer " + this.getApiKey()); download.addHeader("ContentType", "application/json"); download.setPostData(postData); String content = download.asString(); JSONObject obj = new JSONObject(content); try { List<BibEntry> fetchedEntries = getParser().parseEntries(obj.optString("export")); if (fetchedEntries.isEmpty()) { return Collections.emptyList(); } // Post-cleanup fetchedEntries.forEach(this::doPostCleanup); return fetchedEntries; } catch (JSONException e) { return Collections.emptyList(); } } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { throw new FetcherException("A network error occurred", e); } catch (ParseException e) { throw new FetcherException("An internal parser error occurred", e); } } @Override public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { URL urlForQuery; try { urlForQuery = getURLForQuery(luceneQuery); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { throw new FetcherException("A network error occurred", e); } List<String> bibCodes = fetchBibcodes(urlForQuery); List<BibEntry> results = performSearchByIds(bibCodes); return results; } @Override public Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException { URL urlForQuery; try { urlForQuery = getURLForQuery(luceneQuery, pageNumber); } catch (URISyntaxException e) { throw new FetcherException("Search URI is malformed", e); } catch (IOException e) { throw new FetcherException("A network error occurred", e); } // This is currently just interpreting the complex query as a default string query List<String> bibCodes = fetchBibcodes(urlForQuery); Collection<BibEntry> results = performSearchByIds(bibCodes); return new Page<>(luceneQuery.toString(), pageNumber, results); } @Override public URLDownload getUrlDownload(URL url) { URLDownload urlDownload = new URLDownload(url); urlDownload.addHeader("Authorization", "Bearer " + this.getApiKey()); return urlDownload; } }
14,077
42.051988
137
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/BibsonomyScraper.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.URL; import java.util.Optional; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.net.URLDownload; import org.jabref.model.entry.BibEntry; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Convenience class for getting BibTeX entries from the BibSonomy scraper, from an URL pointing to an entry. */ public class BibsonomyScraper { private static final String BIBSONOMY_SCRAPER = "https://scraper.bibsonomy.org/service?url="; private static final String BIBSONOMY_SCRAPER_POST = "&format=bibtex"; private static final Logger LOGGER = LoggerFactory.getLogger(BibsonomyScraper.class); private BibsonomyScraper() { } /** * Return a BibEntry by looking up the given url from the BibSonomy scraper. */ public static Optional<BibEntry> getEntry(String entryUrl, ImportFormatPreferences importFormatPreferences) { try { // Replace special characters by corresponding sequences: String cleanURL = entryUrl.replace("%", "%25").replace(":", "%3A").replace("/", "%2F").replace("?", "%3F") .replace("&", "%26").replace("=", "%3D"); URL url = new URL(BibsonomyScraper.BIBSONOMY_SCRAPER + cleanURL + BibsonomyScraper.BIBSONOMY_SCRAPER_POST); String bibtex = new URLDownload(url).asString(); return BibtexParser.singleFromString(bibtex, importFormatPreferences); } catch (IOException ex) { LOGGER.warn("Could not download entry", ex); return Optional.empty(); } catch (ParseException ex) { LOGGER.warn("Could not parse entry", ex); return Optional.empty(); } catch (RuntimeException ex) { LOGGER.warn("Could not get entry", ex); return Optional.empty(); } } }
2,046
37.622642
119
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/BiodiversityLibrary.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.stream.IntStream; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.ImporterPreferences; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.BiodiversityLibraryTransformer; import org.jabref.logic.importer.util.JsonReader; import org.jabref.logic.net.URLDownload; import org.jabref.logic.preferences.FetcherApiKey; import org.jabref.logic.util.BuildInfo; import org.jabref.model.entry.Author; import org.jabref.model.entry.AuthorList; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.types.StandardEntryType; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONException; import kong.unirest.json.JSONObject; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.tinylog.Logger; /** * Fetches data from the Biodiversity Heritage Library * * @see <a href="https://www.biodiversitylibrary.org/docs/api3.html">API documentation</a> */ public class BiodiversityLibrary implements SearchBasedParserFetcher, CustomizableKeyFetcher { private static final String API_KEY = new BuildInfo().biodiversityHeritageApiKey; private static final String BASE_URL = "https://www.biodiversitylibrary.org/api3"; private static final String RESPONSE_FORMAT = "json"; private static final String TEST_URL_WITHOUT_API_KEY = "https://www.biodiversitylibrary.org/api3?apikey="; private static final String FETCHER_NAME = "Biodiversity Heritage"; private final ImporterPreferences importerPreferences; public BiodiversityLibrary(ImporterPreferences importerPreferences) { this.importerPreferences = importerPreferences; } @Override public String getName() { return FETCHER_NAME; } @Override public String getTestUrl() { return TEST_URL_WITHOUT_API_KEY; } public URL getBaseURL() throws URISyntaxException, MalformedURLException { URIBuilder baseURI = new URIBuilder(BASE_URL); baseURI.addParameter("apikey", getApiKey()); baseURI.addParameter("format", RESPONSE_FORMAT); return baseURI.build().toURL(); } public URL getItemMetadataURL(String identifier) throws URISyntaxException, MalformedURLException { URIBuilder uriBuilder = new URIBuilder(getBaseURL().toURI()); uriBuilder.addParameter("op", "GetItemMetadata"); uriBuilder.addParameter("pages", "f"); uriBuilder.addParameter("ocr", "f"); uriBuilder.addParameter("ocr", "f"); uriBuilder.addParameter("id", identifier); return uriBuilder.build().toURL(); } public URL getPartMetadataURL(String identifier) throws URISyntaxException, MalformedURLException { URIBuilder uriBuilder = new URIBuilder(getBaseURL().toURI()); uriBuilder.addParameter("op", "GetPartMetadata"); uriBuilder.addParameter("pages", "f"); uriBuilder.addParameter("names", "f"); uriBuilder.addParameter("id", identifier); return uriBuilder.build().toURL(); } public JSONObject getDetails(URL url) throws IOException { URLDownload download = new URLDownload(url); String response = download.asString(); Logger.debug("Response {}", response); return new JSONObject(response).getJSONArray("Result").getJSONObject(0); } public BibEntry parseBibJSONtoBibtex(JSONObject item, BibEntry entry) throws IOException, URISyntaxException { if (item.has("BHLType")) { if (item.getString("BHLType").equals("Part")) { URL url = getPartMetadataURL(item.getString("PartID")); JSONObject itemsDetails = getDetails(url); entry.setField(StandardField.LANGUAGE, itemsDetails.optString("Language", "")); entry.setField(StandardField.DOI, itemsDetails.optString("Doi", "")); entry.setField(StandardField.PUBLISHER, itemsDetails.optString("PublisherName", "")); entry.setField(StandardField.DATE, itemsDetails.optString("Date", "")); entry.setField(StandardField.VOLUME, itemsDetails.optString("Volume", "")); entry.setField(StandardField.URL, itemsDetails.optString("PartUrl", "")); } if (item.getString("BHLType").equals("Item")) { URL url = getItemMetadataURL(item.getString("ItemID")); JSONObject itemsDetails = getDetails(url); entry.setField(StandardField.EDITOR, itemsDetails.optString("Sponsor", "")); entry.setField(StandardField.PUBLISHER, itemsDetails.optString("HoldingInstitution", "")); entry.setField(StandardField.LANGUAGE, itemsDetails.optString("Language", "")); entry.setField(StandardField.URL, itemsDetails.optString("ItemUrl", "")); if (itemsDetails.has("Date") && !entry.hasField(StandardField.DATE) && !entry.hasField(StandardField.YEAR)) { entry.setField(StandardField.DATE, itemsDetails.getString("Date")); } } } return entry; } public BibEntry jsonResultToBibEntry(JSONObject item) { BibEntry entry = new BibEntry(); if ("Book".equals(item.optString("Genre"))) { entry.setType(StandardEntryType.Book); } else { entry.setType(StandardEntryType.Article); } entry.setField(StandardField.TITLE, item.optString("Title", "")); entry.setField(StandardField.AUTHOR, toAuthors(item.optJSONArray("Authors"))); entry.setField(StandardField.PAGES, item.optString("PageRange", "")); entry.setField(StandardField.LOCATION, item.optString("PublisherPlace", "")); entry.setField(StandardField.PUBLISHER, item.optString("PublisherName", "")); entry.setField(StandardField.DATE, item.optString("Date", "")); entry.setField(StandardField.YEAR, item.optString("PublicationDate", "")); entry.setField(StandardField.JOURNALTITLE, item.optString("ContainerTitle", "")); entry.setField(StandardField.VOLUME, item.optString("Volume", "")); return entry; } private String toAuthors(JSONArray authors) { if (authors == null) { return ""; } // input: list of { "Name": "Author name,"} return IntStream.range(0, authors.length()) .mapToObj(authors::getJSONObject) .map(author -> new Author( author.optString("Name", ""), "", "", "", "")) .collect(AuthorList.collect()) .getAsFirstLastNamesWithAnd(); } @Override public Parser getParser() { return inputStream -> { JSONObject response = JsonReader.toJsonObject(inputStream); if (response.isEmpty()) { return Collections.emptyList(); } String errorMessage = response.getString("ErrorMessage"); if (!errorMessage.isBlank()) { return Collections.emptyList(); } JSONArray items = response.getJSONArray("Result"); List<BibEntry> entries = new ArrayList<>(items.length()); for (int i = 0; i < items.length(); i++) { JSONObject item = items.getJSONObject(i); BibEntry entry = jsonResultToBibEntry(item); try { entry = parseBibJSONtoBibtex(item, entry); } catch ( JSONException | IOException | URISyntaxException exception) { throw new ParseException("Error when parsing entry", exception); } entries.add(entry); } return entries; }; } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(getBaseURL().toURI()); BiodiversityLibraryTransformer transformer = new BiodiversityLibraryTransformer(); uriBuilder.addParameter("op", "PublicationSearch"); uriBuilder.addParameter("searchtype", "C"); uriBuilder.addParameter("searchterm", transformer.transformLuceneQuery(luceneQuery).orElse("")); return uriBuilder.build().toURL(); } private String getApiKey() { return importerPreferences.getApiKeys() .stream() .filter(key -> key.getName().equalsIgnoreCase(this.getName())) .filter(FetcherApiKey::shouldUse) .findFirst() .map(FetcherApiKey::getKey) .orElse(API_KEY); } }
9,375
41.044843
125
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/BvbFetcher.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer; import org.jabref.logic.importer.fileformat.MarcXmlParser; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public class BvbFetcher implements SearchBasedParserFetcher { private static final String URL_PATTERN = "http://bvbr.bib-bvb.de:5661/bvb01sru?"; @Override public String getName() { return "Bibliotheksverbund Bayern (Experimental)"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.empty(); } @Override public URL getURLForQuery(QueryNode query) throws URISyntaxException, MalformedURLException { URIBuilder uriBuilder = new URIBuilder(URL_PATTERN); uriBuilder.addParameter("version", "1.1"); uriBuilder.addParameter("recordSchema", "marcxml"); uriBuilder.addParameter("operation", "searchRetrieve"); uriBuilder.addParameter("query", new DefaultQueryTransformer().transformLuceneQuery(query).orElse("")); uriBuilder.addParameter("maximumRecords", "30"); return uriBuilder.build().toURL(); } @Override public Parser getParser() { return new MarcXmlParser(); } }
1,581
32.659574
111
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CiteSeer.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.URL; import java.util.List; import java.util.Objects; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.SearchBasedFetcher; import org.jabref.logic.importer.fetcher.transformers.CiteSeerQueryTransformer; import org.jabref.logic.importer.fileformat.CiteSeerParser; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import kong.unirest.JsonNode; import kong.unirest.Unirest; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONElement; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public class CiteSeer implements SearchBasedFetcher, FulltextFetcher { private static final String BASE_URL = "citeseerx.ist.psu.edu"; private static final String API_URL = "https://citeseerx.ist.psu.edu/api/search"; private static final String PDF_URL = "https://" + BASE_URL + "/document?repid=rep1&type=pdf&doi=%s"; private CiteSeerQueryTransformer transformer; public CiteSeer() { } @Override public String getName() { return "CiteSeerX"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_CITESEERX); } @Override public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { // ADR-0014 try { JSONElement payload = getPayloadJSON(luceneQuery); JsonNode requestResponse = Unirest.post(API_URL) .header("authority", BASE_URL) .header("accept", "application/json, text/plain, */*") .header("content-type", "application/json;charset=UTF-8") .header("origin", "https://" + BASE_URL) .body(payload) .asJson().getBody(); Optional<JSONArray> jsonResponse = Optional.of(requestResponse) .map(JsonNode::getObject) .filter(Objects::nonNull) .map(response -> response.optJSONArray("response")) .filter(Objects::nonNull); if (!jsonResponse.isPresent()) { return List.of(); } CiteSeerParser parser = new CiteSeerParser(); List<BibEntry> fetchedEntries = parser.parseCiteSeerResponse(jsonResponse.orElse(new JSONArray())); return fetchedEntries; } catch (ParseException ex) { throw new FetcherException("An internal parser error occurred while parsing CiteSeer entries, ", ex); } } private JSONElement getPayloadJSON(QueryNode luceneQuery) { transformer = new CiteSeerQueryTransformer(); String transformedQuery = transformer.transformLuceneQuery(luceneQuery).orElse(""); return transformer.getJSONPayload(); } @Override public Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException { Objects.requireNonNull(entry); // does not use a valid DOI, but Cite Seer's id / hash available for each entry Optional<String> id = entry.getField(StandardField.DOI); if (id.isPresent()) { String source = String.format(PDF_URL, id.get()); return Optional.of(new URL(source)); } // if using id fails, we can try the source URL Optional<String> urlString = entry.getField(StandardField.URL); if (urlString.isPresent()) { return Optional.of(new URL(urlString.get())); } return Optional.empty(); } }
4,097
38.028571
113
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CollectionOfComputerScienceBibliographiesFetcher.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.Arrays; import org.jabref.logic.cleanup.FieldFormatterCleanup; import org.jabref.logic.formatter.bibtexfields.RemoveDigitsFormatter; import org.jabref.logic.formatter.bibtexfields.RemoveNewlinesFormatter; import org.jabref.logic.formatter.bibtexfields.RemoveRedundantSpacesFormatter; import org.jabref.logic.formatter.bibtexfields.ReplaceTabsBySpaceFormater; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.CollectionOfComputerScienceBibliographiesQueryTransformer; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.FieldFactory; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.field.UnknownField; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public class CollectionOfComputerScienceBibliographiesFetcher implements SearchBasedParserFetcher { private static final String BASIC_SEARCH_URL = "http://liinwww.ira.uka.de/bibliography/rss?"; private final CollectionOfComputerScienceBibliographiesParser parser; public CollectionOfComputerScienceBibliographiesFetcher(ImportFormatPreferences importFormatPreferences) { this.parser = new CollectionOfComputerScienceBibliographiesParser(importFormatPreferences); } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { return new URIBuilder(BASIC_SEARCH_URL) .addParameter("query", new CollectionOfComputerScienceBibliographiesQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")) .addParameter("sort", "score") .build() .toURL(); } @Override public Parser getParser() { return parser; } @Override public String getName() { return "Collection of Computer Science Bibliographies"; } @Override public void doPostCleanup(BibEntry entry) { new FieldFormatterCleanup(StandardField.ABSTRACT, new RemoveNewlinesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.ABSTRACT, new ReplaceTabsBySpaceFormater()).cleanup(entry); new FieldFormatterCleanup(StandardField.ABSTRACT, new RemoveRedundantSpacesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.EDITOR, new RemoveDigitsFormatter()).cleanup(entry); // identifier fields is a key-value field // example: "urn:isbn:978-1-4503-5217-8; doi:10.1145/3129790.3129810; ISI:000505046100032; Scopus 2-s2.0-85037741580" // thus, key can contain multiple ":"; sometimes value separated by " " instead of ":" UnknownField identifierField = new UnknownField("identifier"); entry.getField(identifierField) .stream() .flatMap(value -> Arrays.stream(value.split("; "))) .forEach(identifierKeyValue -> { // check for pattern "Scopus 2-..." String[] identifierKeyValueSplit = identifierKeyValue.split(" "); if (identifierKeyValueSplit.length == 1) { // check for pattern "doi:..." identifierKeyValueSplit = identifierKeyValue.split(":"); } int length = identifierKeyValueSplit.length; if (length < 2) { return; } // in the case "urn:isbn:", just "isbn" is used String key = identifierKeyValueSplit[length - 2]; String value = identifierKeyValueSplit[length - 1]; Field field = FieldFactory.parseField(key); if (!entry.hasField(field)) { entry.setField(field, value); } }); entry.clearField(identifierField); } }
4,254
45.758242
148
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CollectionOfComputerScienceBibliographiesParser.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Scanner; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.jabref.logic.formatter.bibtexfields.HtmlToUnicodeFormatter; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.net.URLDownload; import org.jabref.model.entry.BibEntry; public class CollectionOfComputerScienceBibliographiesParser implements Parser { final static Pattern REGEX_FOR_LINKS = Pattern.compile("<item>[\\s\\S]*?<link>([\\s\\S]*?)<\\/link>[\\s\\S]*?<\\/item>"); final static Pattern REGEX_FOR_BIBTEX = Pattern.compile("<pre class=\"bibtex\">([\\s\\S]*?)<\\/pre>"); final BibtexParser bibtexParser; final HtmlToUnicodeFormatter htmlToUnicodeFormatter; public CollectionOfComputerScienceBibliographiesParser(ImportFormatPreferences importFormatPreferences) { this.bibtexParser = new BibtexParser(importFormatPreferences); this.htmlToUnicodeFormatter = new HtmlToUnicodeFormatter(); } @Override public List<BibEntry> parseEntries(InputStream inputStream) throws ParseException { try { List<String> links = matchRegexFromInputStreamHtml(inputStream, REGEX_FOR_LINKS); String bibtexDataString = parseBibtexStringsFromLinks(links) .stream() .collect(Collectors.joining()); return bibtexParser.parseEntries(bibtexDataString); } catch (IOException e) { throw new ParseException(e); } } private List<String> matchRegexFromInputStreamHtml(InputStream inputStream, Pattern pattern) { try (Scanner scanner = new Scanner(inputStream)) { return scanner.findAll(pattern) .map(match -> htmlToUnicodeFormatter.format(match.group(1))) .collect(Collectors.toList()); } } private List<String> parseBibtexStringsFromLinks(List<String> links) throws IOException { List<String> bibtexStringsFromAllLinks = new ArrayList<>(); for (String link : links) { try (InputStream inputStream = new URLDownload(link).asInputStream()) { List<String> bibtexStringsFromLink = matchRegexFromInputStreamHtml(inputStream, REGEX_FOR_BIBTEX); bibtexStringsFromAllLinks.addAll(bibtexStringsFromLink); } } return bibtexStringsFromAllLinks; } }
2,703
39.358209
125
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/ComplexSearchQuery.java
package org.jabref.logic.importer.fetcher; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.StringJoiner; import org.jabref.model.strings.StringUtil; import org.apache.lucene.index.Term; public class ComplexSearchQuery { // Field for non-fielded search private final List<String> defaultField; private final List<String> authors; private final List<String> titlePhrases; private final List<String> abstractPhrases; private final Integer fromYear; private final Integer toYear; private final Integer singleYear; private final String journal; private final String doi; private ComplexSearchQuery(List<String> defaultField, List<String> authors, List<String> titlePhrases, List<String> abstractPhrases, Integer fromYear, Integer toYear, Integer singleYear, String journal, String doi) { this.defaultField = defaultField; this.authors = authors; this.titlePhrases = titlePhrases; this.abstractPhrases = abstractPhrases; this.fromYear = fromYear; // Some APIs do not support, or not fully support, year based search. In these cases, the non applicable parameters are ignored. this.toYear = toYear; this.journal = journal; this.singleYear = singleYear; this.doi = doi; } public static ComplexSearchQuery fromTerms(List<Term> terms) { ComplexSearchQueryBuilder builder = ComplexSearchQuery.builder(); terms.forEach(term -> { String termText = term.text(); switch (term.field().toLowerCase()) { case "author" -> builder.author(termText); case "title" -> builder.titlePhrase(termText); case "abstract" -> builder.abstractPhrase(termText); case "journal" -> builder.journal(termText); case "year" -> builder.singleYear(Integer.valueOf(termText)); case "year-range" -> builder.parseYearRange(termText); case "doi" -> builder.DOI(termText); case "default" -> builder.defaultFieldPhrase(termText); // add unknown field as default field default -> builder.defaultFieldPhrase(termText); } }); return builder.build(); } public List<String> getDefaultFieldPhrases() { return defaultField; } public List<String> getAuthors() { return authors; } public List<String> getTitlePhrases() { return titlePhrases; } public List<String> getAbstractPhrases() { return abstractPhrases; } public Optional<Integer> getFromYear() { return Optional.ofNullable(fromYear); } public Optional<Integer> getToYear() { return Optional.ofNullable(toYear); } public Optional<Integer> getSingleYear() { return Optional.ofNullable(singleYear); } public Optional<String> getJournal() { return Optional.ofNullable(journal); } public Optional<String> getDOI() { return Optional.ofNullable(doi); } public static ComplexSearchQueryBuilder builder() { return new ComplexSearchQueryBuilder(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ComplexSearchQuery that = (ComplexSearchQuery) o; // Just check for set equality, order does not matter if (!(getDefaultFieldPhrases().containsAll(that.getDefaultFieldPhrases()) && that.getDefaultFieldPhrases().containsAll(getDefaultFieldPhrases()))) { return false; } if (!(getAuthors().containsAll(that.getAuthors()) && that.getAuthors().containsAll(getAuthors()))) { return false; } if (!(getTitlePhrases().containsAll(that.getTitlePhrases()) && that.getTitlePhrases().containsAll(getTitlePhrases()))) { return false; } if (!(getAbstractPhrases().containsAll(that.getAbstractPhrases()) && that.getAbstractPhrases().containsAll(getAbstractPhrases()))) { return false; } if (getFromYear().isPresent() ? !getFromYear().equals(that.getFromYear()) : that.getFromYear().isPresent()) { return false; } if (getToYear().isPresent() ? !getToYear().equals(that.getToYear()) : that.getToYear().isPresent()) { return false; } if (getSingleYear().isPresent() ? !getSingleYear().equals(that.getSingleYear()) : that.getSingleYear().isPresent()) { return false; } if (getDOI().isPresent() ? !getDOI().equals(that.getDOI()) : that.getDOI().isPresent()) { return false; } return getJournal().isPresent() ? getJournal().equals(that.getJournal()) : that.getJournal().isEmpty(); } @Override public int hashCode() { return Objects.hash(defaultField, getAuthors(), getSingleYear(), getAbstractPhrases(), getFromYear(), getToYear(), getTitlePhrases(), getJournal(), getDOI()); } @Override public String toString() { StringJoiner stringJoiner = new StringJoiner(" "); getSingleYear().ifPresent(singleYear -> stringJoiner.add(singleYear.toString())); getFromYear().ifPresent(fromYear -> stringJoiner.add(fromYear.toString())); getToYear().ifPresent(toYear -> stringJoiner.add(toYear.toString())); getJournal().ifPresent(stringJoiner::add); getDOI().ifPresent(newElement -> stringJoiner.add("doi:" + newElement)); stringJoiner.add(String.join(" ", getTitlePhrases())) .add(String.join(" ", getDefaultFieldPhrases())) .add(String.join(" ", getAuthors())) .add(String.join(" ", getAbstractPhrases())); return stringJoiner.toString(); } public static class ComplexSearchQueryBuilder { private final List<String> defaultFieldPhrases = new ArrayList<>(); private final List<String> authors = new ArrayList<>(); private final List<String> titlePhrases = new ArrayList<>(); private final List<String> abstractPhrases = new ArrayList<>(); private String journal; private String doi; private Integer fromYear; private Integer toYear; private Integer singleYear; private ComplexSearchQueryBuilder() { } public ComplexSearchQueryBuilder defaultFieldPhrase(String defaultFieldPhrase) { if (Objects.requireNonNull(defaultFieldPhrase).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } // Strip all quotes before wrapping this.defaultFieldPhrases.add(String.format("\"%s\"", defaultFieldPhrase.replace("\"", ""))); return this; } /** * Adds author and wraps it in quotes */ public ComplexSearchQueryBuilder author(String author) { if (Objects.requireNonNull(author).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } // Strip all quotes before wrapping this.authors.add(String.format("\"%s\"", author.replace("\"", ""))); return this; } /** * Adds title phrase and wraps it in quotes */ public ComplexSearchQueryBuilder titlePhrase(String titlePhrase) { if (Objects.requireNonNull(titlePhrase).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } // Strip all quotes before wrapping this.titlePhrases.add(String.format("\"%s\"", titlePhrase.replace("\"", ""))); return this; } /** * Adds abstract phrase and wraps it in quotes */ public ComplexSearchQueryBuilder abstractPhrase(String abstractPhrase) { if (Objects.requireNonNull(abstractPhrase).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } // Strip all quotes before wrapping this.titlePhrases.add(String.format("\"%s\"", abstractPhrase.replace("\"", ""))); return this; } public ComplexSearchQueryBuilder fromYearAndToYear(Integer fromYear, Integer toYear) { if (Objects.nonNull(singleYear)) { throw new IllegalArgumentException("You can not use single year and year range search."); } this.fromYear = Objects.requireNonNull(fromYear); this.toYear = Objects.requireNonNull(toYear); return this; } public ComplexSearchQueryBuilder singleYear(Integer singleYear) { if (Objects.nonNull(fromYear) || Objects.nonNull(toYear)) { throw new IllegalArgumentException("You can not use single year and year range search."); } this.singleYear = Objects.requireNonNull(singleYear); return this; } public ComplexSearchQueryBuilder journal(String journal) { if (Objects.requireNonNull(journal).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } this.journal = String.format("\"%s\"", journal.replace("\"", "")); return this; } public ComplexSearchQueryBuilder DOI(String doi) { if (Objects.requireNonNull(doi).isBlank()) { throw new IllegalArgumentException("Parameter must not be blank"); } this.doi = doi.replace("\"", ""); return this; } public ComplexSearchQueryBuilder terms(Collection<Term> terms) { terms.forEach(term -> { String termText = term.text(); switch (term.field().toLowerCase()) { case "author" -> this.author(termText); case "title" -> this.titlePhrase(termText); case "abstract" -> this.abstractPhrase(termText); case "journal" -> this.journal(termText); case "doi" -> this.DOI(termText); case "year" -> this.singleYear(Integer.valueOf(termText)); case "year-range" -> this.parseYearRange(termText); case "default" -> this.defaultFieldPhrase(termText); } }); return this; } /** * Instantiates the AdvancesSearchConfig from the provided Builder parameters * If all text fields are empty an empty optional is returned * * @return ComplexSearchQuery instance with the fields set to the values defined in the building instance. * @throws IllegalStateException An IllegalStateException is thrown in case all text search fields are empty. * See: https://softwareengineering.stackexchange.com/questions/241309/builder-pattern-when-to-fail/241320#241320 */ public ComplexSearchQuery build() throws IllegalStateException { if (textSearchFieldsAndYearFieldsAreEmpty()) { throw new IllegalStateException("At least one text field has to be set"); } return new ComplexSearchQuery(defaultFieldPhrases, authors, titlePhrases, abstractPhrases, fromYear, toYear, singleYear, journal, doi); } void parseYearRange(String termText) { String[] split = termText.split("-"); int fromYear = 0; int toYear = 9999; try { fromYear = Integer.parseInt(split[0]); } catch (NumberFormatException e) { // default value already set } if (split.length > 1) { try { toYear = Integer.parseInt(split[1]); } catch (NumberFormatException e) { // default value already set } } this.fromYearAndToYear(fromYear, toYear); } private boolean textSearchFieldsAndYearFieldsAreEmpty() { return this.stringListIsBlank(defaultFieldPhrases) && this.stringListIsBlank(titlePhrases) && this.stringListIsBlank(authors) && this.stringListIsBlank(abstractPhrases) && StringUtil.isBlank(journal) && StringUtil.isBlank(doi) && yearFieldsAreEmpty(); } private boolean yearFieldsAreEmpty() { return Objects.isNull(singleYear) && Objects.isNull(fromYear) && Objects.isNull(toYear); } private boolean stringListIsBlank(List<String> stringList) { return Objects.isNull(stringList) || stringList.stream().allMatch(String::isBlank); } } }
13,021
40.078864
220
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CompositeSearchBasedFetcher.java
package org.jabref.logic.importer.fetcher; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.SearchBasedFetcher; import org.jabref.model.entry.BibEntry; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class CompositeSearchBasedFetcher implements SearchBasedFetcher { public static final String FETCHER_NAME = "SearchAll"; private static final Logger LOGGER = LoggerFactory.getLogger(CompositeSearchBasedFetcher.class); private final Set<SearchBasedFetcher> fetchers; private final int maximumNumberOfReturnedResults; public CompositeSearchBasedFetcher(Set<SearchBasedFetcher> searchBasedFetchers, int maximumNumberOfReturnedResults) throws IllegalArgumentException { if (searchBasedFetchers == null) { throw new IllegalArgumentException("The set of searchBasedFetchers must not be null!"); } // Remove the Composite Fetcher instance from its own fetcher set to prevent a StackOverflow this.fetchers = searchBasedFetchers.stream() .filter(searchBasedFetcher -> searchBasedFetcher != this) .collect(Collectors.toSet()); this.maximumNumberOfReturnedResults = maximumNumberOfReturnedResults; } @Override public String getName() { return FETCHER_NAME; } @Override public Optional<HelpFile> getHelpPage() { return Optional.empty(); } @Override public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { // All entries have to be converted into one format, this is necessary for the format conversion return fetchers.parallelStream() .flatMap(searchBasedFetcher -> { try { return searchBasedFetcher.performSearch(luceneQuery).stream(); } catch (FetcherException e) { LOGGER.warn(String.format("%s API request failed", searchBasedFetcher.getName()), e); return Stream.empty(); } }) .limit(maximumNumberOfReturnedResults) .collect(Collectors.toList()); } }
2,590
38.861538
119
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CrossRef.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.stream.IntStream; import org.jabref.logic.cleanup.FieldFormatterCleanup; import org.jabref.logic.formatter.bibtexfields.ClearFormatter; import org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter; import org.jabref.logic.importer.EntryBasedParserFetcher; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.IdBasedParserFetcher; import org.jabref.logic.importer.IdParserFetcher; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer; import org.jabref.logic.importer.util.JsonReader; import org.jabref.logic.util.strings.StringSimilarity; import org.jabref.model.entry.Author; import org.jabref.model.entry.AuthorList; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.jabref.model.entry.types.EntryType; import org.jabref.model.entry.types.StandardEntryType; import org.jabref.model.util.OptionalUtil; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONException; import kong.unirest.json.JSONObject; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** * A class for fetching DOIs from CrossRef * <p> * See https://github.com/CrossRef/rest-api-doc */ public class CrossRef implements IdParserFetcher<DOI>, EntryBasedParserFetcher, SearchBasedParserFetcher, IdBasedParserFetcher { private static final String API_URL = "https://api.crossref.org/works"; private static final RemoveBracesFormatter REMOVE_BRACES_FORMATTER = new RemoveBracesFormatter(); @Override public String getName() { return "Crossref"; } @Override public URL getURLForEntry(BibEntry entry) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(API_URL); entry.getLatexFreeField(StandardField.TITLE).ifPresent(title -> uriBuilder.addParameter("query.bibliographic", title)); entry.getLatexFreeField(StandardField.AUTHOR).ifPresent(author -> uriBuilder.addParameter("query.author", author)); entry.getLatexFreeField(StandardField.YEAR).ifPresent(year -> uriBuilder.addParameter("filter", "from-pub-date:" + year) ); uriBuilder.addParameter("rows", "20"); // = API default uriBuilder.addParameter("offset", "0"); // start at beginning return uriBuilder.build().toURL(); } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(API_URL); uriBuilder.addParameter("query", new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); return uriBuilder.build().toURL(); } @Override public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(API_URL + "/" + identifier); return uriBuilder.build().toURL(); } @Override public Parser getParser() { return inputStream -> { JSONObject response = JsonReader.toJsonObject(inputStream); if (response.isEmpty()) { return Collections.emptyList(); } response = response.getJSONObject("message"); if (response.isEmpty()) { return Collections.emptyList(); } if (!response.has("items")) { // Singleton response BibEntry entry = jsonItemToBibEntry(response); return Collections.singletonList(entry); } // Response contains a list JSONArray items = response.getJSONArray("items"); List<BibEntry> entries = new ArrayList<>(items.length()); for (int i = 0; i < items.length(); i++) { JSONObject item = items.getJSONObject(i); BibEntry entry = jsonItemToBibEntry(item); entries.add(entry); } return entries; }; } @Override public void doPostCleanup(BibEntry entry) { // Sometimes the fetched entry returns the title also in the subtitle field; in this case only keep the title field if (entry.getField(StandardField.TITLE).equals(entry.getField(StandardField.SUBTITLE))) { new FieldFormatterCleanup(StandardField.SUBTITLE, new ClearFormatter()).cleanup(entry); } } private BibEntry jsonItemToBibEntry(JSONObject item) throws ParseException { try { BibEntry entry = new BibEntry(); entry.setType(convertType(item.getString("type"))); entry.setField(StandardField.TITLE, Optional.ofNullable(item.optJSONArray("title")) .map(array -> array.optString(0)).orElse("")); entry.setField(StandardField.SUBTITLE, Optional.ofNullable(item.optJSONArray("subtitle")) .map(array -> array.optString(0)).orElse("")); entry.setField(StandardField.AUTHOR, toAuthors(item.optJSONArray("author"))); entry.setField(StandardField.YEAR, Optional.ofNullable(item.optJSONObject("published-print")) .map(array -> array.optJSONArray("date-parts")) .map(array -> array.optJSONArray(0)) .map(array -> array.optInt(0)) .map(year -> Integer.toString(year)).orElse("") ); entry.setField(StandardField.DOI, item.getString("DOI")); entry.setField(StandardField.PAGES, item.optString("page")); entry.setField(StandardField.VOLUME, item.optString("volume")); entry.setField(StandardField.ISSN, Optional.ofNullable(item.optJSONArray("ISSN")).map(array -> array.getString(0)).orElse("")); return entry; } catch (JSONException exception) { throw new ParseException("CrossRef API JSON format has changed", exception); } } private String toAuthors(JSONArray authors) { if (authors == null) { return ""; } // input: list of {"given":"A.","family":"Riel","affiliation":[]} return IntStream.range(0, authors.length()) .mapToObj(authors::getJSONObject) .map(author -> new Author( author.optString("given", ""), "", "", author.optString("family", ""), "")) .collect(AuthorList.collect()) .getAsFirstLastNamesWithAnd(); } private EntryType convertType(String type) { return "journal-article".equals(type) ? StandardEntryType.Article : StandardEntryType.Misc; } @Override public Optional<DOI> extractIdentifier(BibEntry inputEntry, List<BibEntry> fetchedEntries) throws FetcherException { final String entryTitle = REMOVE_BRACES_FORMATTER.format(inputEntry.getLatexFreeField(StandardField.TITLE).orElse("")); final StringSimilarity stringSimilarity = new StringSimilarity(); for (BibEntry fetchedEntry : fetchedEntries) { // currently only title-based comparison // title Optional<String> dataTitle = fetchedEntry.getField(StandardField.TITLE); if (OptionalUtil.isPresentAnd(dataTitle, title -> stringSimilarity.isSimilar(entryTitle, title))) { return fetchedEntry.getDOI(); } // subtitle // additional check, as sometimes subtitle is needed but sometimes only duplicates the title Optional<String> dataSubtitle = fetchedEntry.getField(StandardField.SUBTITLE); Optional<String> dataWithSubTitle = OptionalUtil.combine(dataTitle, dataSubtitle, (title, subtitle) -> title + " " + subtitle); if (OptionalUtil.isPresentAnd(dataWithSubTitle, titleWithSubtitle -> stringSimilarity.isSimilar(entryTitle, titleWithSubtitle))) { return fetchedEntry.getDOI(); } } return Optional.empty(); } @Override public String getIdentifierName() { return "DOI"; } }
8,796
42.985
142
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/CustomizableKeyFetcher.java
package org.jabref.logic.importer.fetcher; import org.jabref.logic.importer.WebFetcher; public interface CustomizableKeyFetcher extends WebFetcher { default String getTestUrl() { return null; } }
214
20.5
60
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DBLPFetcher.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.List; import java.util.Objects; import java.util.Optional; import org.jabref.logic.cleanup.DoiCleanup; import org.jabref.logic.cleanup.FieldFormatterCleanup; import org.jabref.logic.cleanup.FieldFormatterCleanups; import org.jabref.logic.formatter.bibtexfields.ClearFormatter; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DBLPQueryTransformer; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.layout.LayoutFormatterBasedFormatter; import org.jabref.logic.layout.format.RemoveLatexCommandsFormatter; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** * Fetches BibTeX data from DBLP (dblp.org) * * @see <a href="https://dblp.dagstuhl.de/faq/13501473">Basic API documentation</a> */ public class DBLPFetcher implements SearchBasedParserFetcher { public static final String FETCHER_NAME = "DBLP"; private static final String BASIC_SEARCH_URL = "https://dblp.org/search/publ/api"; private final ImportFormatPreferences importFormatPreferences; public DBLPFetcher(ImportFormatPreferences importFormatPreferences) { Objects.requireNonNull(importFormatPreferences); this.importFormatPreferences = importFormatPreferences; } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(BASIC_SEARCH_URL); uriBuilder.addParameter("q", new DBLPQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); uriBuilder.addParameter("h", String.valueOf(100)); // number of hits uriBuilder.addParameter("c", String.valueOf(0)); // no need for auto-completion uriBuilder.addParameter("f", String.valueOf(0)); // "from", index of first hit to download uriBuilder.addParameter("format", "bib1"); return uriBuilder.build().toURL(); } @Override public Parser getParser() { return new BibtexParser(importFormatPreferences); } @Override public void doPostCleanup(BibEntry entry) { DoiCleanup doiCleaner = new DoiCleanup(); doiCleaner.cleanup(entry); FieldFormatterCleanups cleanups = new FieldFormatterCleanups(true, List.of( new FieldFormatterCleanup(StandardField.TIMESTAMP, new ClearFormatter()), // unescape the the contents of the URL field, e.g., some\_url\_part becomes some_url_part new FieldFormatterCleanup(StandardField.URL, new LayoutFormatterBasedFormatter(new RemoveLatexCommandsFormatter())) )); cleanups.applySaveActions(entry); } @Override public String getName() { return FETCHER_NAME; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_DBLP); } }
3,430
38.436782
139
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DOABFetcher.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.StringJoiner; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DefaultQueryTransformer; import org.jabref.logic.importer.util.JsonReader; import org.jabref.model.entry.Author; import org.jabref.model.entry.AuthorList; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.types.StandardEntryType; import org.jabref.model.strings.StringUtil; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONException; import kong.unirest.json.JSONObject; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; /** * fetches books from https://www.doabooks.org/ through * <a href="https://www.doabooks.org/en/resources/metadata-harvesting-and-content-dissemination">their API</a>. */ public class DOABFetcher implements SearchBasedParserFetcher { private static final String SEARCH_URL = "https://directory.doabooks.org/rest/search?"; @Override public String getName() { return "DOAB"; } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder builder = new URIBuilder(SEARCH_URL); String query = new DefaultQueryTransformer().transformLuceneQuery(luceneQuery).orElse(""); // adding quotations for the query for more specified results // without the quotation the results returned are not relevant to the query query = ("\"".concat(query)).concat("\""); builder.addParameter("query", query); // bitstreams included in URL building to acquire ISBN's. builder.addParameter("expand", "metadata,bitstreams"); return builder.build().toURL(); } @Override public Parser getParser() { return InputStream -> { // can't use this method JsonReader.toJsonObject(inputStream) because the results are sent in an array // like format resulting in an error when trying to convert them into a json object // created a similar method suitable for this case "toJsonArray" JSONArray response = JsonReader.toJsonArray(InputStream); if (response.isEmpty()) { return Collections.emptyList(); } if (response.length() == 1) { // the information used for bibtex entries are in an array inside the resulting jsonarray // see this query for reference https://directory.doabooks.org/rest/search?query="i open fire"&expand=metadata JSONArray metadataArray = response.getJSONObject(0).getJSONArray("metadata"); JSONArray bitstreamArray = response.getJSONObject(0).getJSONArray("bitstreams"); BibEntry entry = jsonToBibEntry(metadataArray, bitstreamArray); return Collections.singletonList(entry); } List<BibEntry> entries = new ArrayList<>(response.length()); for (int i = 0; i < response.length(); i++) { JSONArray metadataArray = response.getJSONObject(i).getJSONArray("metadata"); JSONArray bitstreamArray = response.getJSONObject(i).getJSONArray("bitstreams"); BibEntry entry = jsonToBibEntry(metadataArray, bitstreamArray); entries.add(entry); } return entries; }; } private BibEntry jsonToBibEntry(JSONArray metadataArray, JSONArray bitstreamArray) { BibEntry entry = new BibEntry(); List<Author> authorsList = new ArrayList<>(); List<Author> editorsList = new ArrayList<>(); StringJoiner keywordJoiner = new StringJoiner(", "); String publisherImprint = ""; // Get the ISBN within the BITSTREAM. See the link below: // https://directory.doabooks.org/rest/search?query=handle:%2220.500.12854/26303%22&expand=metadata,bitstreams // Note that in many cases, an ISBN cannot be obtained in the metadata, even in the BITSTREAM. See the link below: // https://directory.doabooks.org/rest/search?query=%22i%20open%20fire%22&expand=metadata,bitstreams for (int i = 0; i < bitstreamArray.length(); i++) { JSONObject bitstreamObject = bitstreamArray.getJSONObject(i); // Subcategorise each instance of the BITSTREAM by "metadata" key JSONArray array = bitstreamObject.getJSONArray("metadata"); for (int k = 0; k < array.length(); k++) { JSONObject metadataInBitstreamObject = array.getJSONObject(k); if (metadataInBitstreamObject.getString("key").equals("dc.identifier.isbn")) { entry.setField(StandardField.ISBN, metadataInBitstreamObject.getString("value")); } else if (metadataInBitstreamObject.getString("key").equals("oapen.relation.isbn")) { entry.setField(StandardField.ISBN, metadataInBitstreamObject.getString("value")); } } } for (int i = 0; i < metadataArray.length(); i++) { JSONObject dataObject = metadataArray.getJSONObject(i); switch (dataObject.getString("key")) { case "dc.contributor.author" -> { if (dataObject.getString("value").contains("(Ed.)")) { editorsList.add(toAuthor(namePreprocessing(dataObject.getString("value")))); } else { authorsList.add(toAuthor(dataObject.getString("value"))); } } case "dc.type" -> entry.setType(StandardEntryType.Book); case "dc.date.issued" -> entry.setField(StandardField.DATE, String.valueOf( dataObject.getString("value"))); case "oapen.identifier.doi" -> entry.setField(StandardField.DOI, dataObject.getString("value")); case "dc.title" -> entry.setField(StandardField.TITLE, dataObject.getString("value")); case "oapen.pages" -> { try { entry.setField(StandardField.PAGES, String.valueOf(dataObject.getInt("value"))); } catch (JSONException e) { entry.setField(StandardField.PAGES, dataObject.getString("value")); } } case "dc.description.abstract" -> entry.setField(StandardField.ABSTRACT, dataObject.getString("value")); case "dc.language" -> entry.setField(StandardField.LANGUAGE, dataObject.getString("value")); case "publisher.name" -> entry.setField(StandardField.PUBLISHER, dataObject.getString("value")); case "dc.identifier.uri" -> entry.setField(StandardField.URI, dataObject.getString("value")); case "dc.identifier" -> { if (dataObject.getString("value").contains("http")) { entry.setField(StandardField.URL, dataObject.getString("value")); } } case "dc.subject.other" -> keywordJoiner.add(dataObject.getString("value")); case "dc.contributor.editor" -> editorsList.add(toAuthor(dataObject.getString("value"))); case "oapen.volume" -> entry.setField(StandardField.VOLUME, dataObject.getString("value")); case "oapen.relation.isbn", "dc.identifier.isbn" -> entry.setField(StandardField.ISBN, dataObject.getString("value")); case "dc.title.alternative" -> entry.setField(StandardField.SUBTITLE, dataObject.getString("value")); case "oapen.imprint" -> publisherImprint = dataObject.getString("value"); } } entry.setField(StandardField.AUTHOR, AuthorList.of(authorsList).getAsFirstLastNamesWithAnd()); entry.setField(StandardField.EDITOR, AuthorList.of(editorsList).getAsFirstLastNamesWithAnd()); entry.setField(StandardField.KEYWORDS, String.valueOf(keywordJoiner)); // Special condition to check if publisher field is empty. If so, retrieve imprint (if available) if (entry.getField(StandardField.PUBLISHER).isEmpty()) { if (!StringUtil.isNullOrEmpty(publisherImprint)) { entry.setField(StandardField.PUBLISHER, publisherImprint); } } return entry; } private Author toAuthor(String author) { return AuthorList.parse(author).getAuthor(0); } private String namePreprocessing(String name) { return name.replace("(Ed.)", ""); } }
9,217
50.497207
126
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DOAJFetcher.java
package org.jabref.logic.importer.fetcher; import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.SearchBasedParserFetcher; import org.jabref.logic.importer.fetcher.transformers.DefaultLuceneQueryTransformer; import org.jabref.logic.util.OS; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.Field; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.types.StandardEntryType; import org.jabref.model.strings.StringUtil; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONObject; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Fetches data from the Directory of Open Access Journals (DOAJ) * * @see <a href="https://doaj.org/api/v1/docs">API documentation</a> */ public class DOAJFetcher implements SearchBasedParserFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(DOAJFetcher.class); private static final String SEARCH_URL = "https://doaj.org/api/v1/search/articles/"; private final ImportFormatPreferences preferences; public DOAJFetcher(ImportFormatPreferences preferences) { this.preferences = Objects.requireNonNull(preferences); } /** * Convert a JSONObject containing a bibJSON entry to a BibEntry * * @param bibJsonEntry The JSONObject to convert * @return the converted BibEntry */ public static BibEntry parseBibJSONtoBibtex(JSONObject bibJsonEntry, Character keywordSeparator) { // Fields that are directly accessible at the top level BibJson object Field[] singleFields = {StandardField.YEAR, StandardField.TITLE, StandardField.ABSTRACT, StandardField.MONTH}; // Fields that are accessible in the journal part of the BibJson object Field[] journalSingleFields = {StandardField.PUBLISHER, StandardField.NUMBER, StandardField.VOLUME}; BibEntry entry = new BibEntry(StandardEntryType.Article); // Authors if (bibJsonEntry.has("author")) { JSONArray authors = bibJsonEntry.getJSONArray("author"); List<String> authorList = new ArrayList<>(); for (int i = 0; i < authors.length(); i++) { if (authors.getJSONObject(i).has("name")) { authorList.add(authors.getJSONObject(i).getString("name")); } else { LOGGER.info("Empty author name."); } } entry.setField(StandardField.AUTHOR, String.join(" and ", authorList)); } else { LOGGER.info("No author found."); } // Direct accessible fields for (Field field : singleFields) { if (bibJsonEntry.has(field.getName())) { entry.setField(field, bibJsonEntry.getString(field.getName())); } } // Page numbers if (bibJsonEntry.has("start_page")) { if (bibJsonEntry.has("end_page")) { entry.setField(StandardField.PAGES, bibJsonEntry.getString("start_page") + "--" + bibJsonEntry.getString("end_page")); } else { entry.setField(StandardField.PAGES, bibJsonEntry.getString("start_page")); } } // Journal if (bibJsonEntry.has("journal")) { JSONObject journal = bibJsonEntry.getJSONObject("journal"); // Journal title if (journal.has("title")) { entry.setField(StandardField.JOURNAL, journal.getString("title").trim()); } else { LOGGER.info("No journal title found."); } // Other journal related fields for (Field field : journalSingleFields) { if (journal.has(field.getName())) { entry.setField(field, journal.getString(field.getName())); } } } else { LOGGER.info("No journal information found."); } // Keywords if (bibJsonEntry.has("keywords")) { JSONArray keywords = bibJsonEntry.getJSONArray("keywords"); for (int i = 0; i < keywords.length(); i++) { if (!keywords.isNull(i)) { entry.addKeyword(keywords.getString(i).trim(), keywordSeparator); } } } // Identifiers if (bibJsonEntry.has("identifier")) { JSONArray identifiers = bibJsonEntry.getJSONArray("identifier"); for (int i = 0; i < identifiers.length(); i++) { String type = identifiers.getJSONObject(i).getString("type"); if ("doi".equals(type)) { entry.setField(StandardField.DOI, identifiers.getJSONObject(i).getString("id")); } else if ("pissn".equals(type)) { entry.setField(StandardField.ISSN, identifiers.getJSONObject(i).getString("id")); } else if ("eissn".equals(type)) { entry.setField(StandardField.ISSN, identifiers.getJSONObject(i).getString("id")); } } } // Links if (bibJsonEntry.has("link")) { JSONArray links = bibJsonEntry.getJSONArray("link"); for (int i = 0; i < links.length(); i++) { if (links.getJSONObject(i).has("type")) { String type = links.getJSONObject(i).getString("type"); if ("fulltext".equals(type) && links.getJSONObject(i).has("url")) { entry.setField(StandardField.URL, links.getJSONObject(i).getString("url")); } } } } return entry; } public static URIBuilder addPath(URIBuilder base, String subPath) { // slightly altered version based on https://gist.github.com/enginer/230e2dc2f1d213a825d5 if (StringUtil.isBlank(subPath) || "/".equals(subPath)) { return base; } else { base.setPath(appendSegmentToPath(base.getPath(), subPath)); return base; } } private static String appendSegmentToPath(String path, String segment) { if (StringUtil.isBlank(path)) { path = "/"; } if (path.charAt(path.length() - 1) == '/' || segment.startsWith("/")) { return path + segment; } return path + "/" + segment; } @Override public String getName() { return "DOAJ"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_DOAJ); } @Override public URL getURLForQuery(QueryNode luceneQuery) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder(SEARCH_URL); DOAJFetcher.addPath(uriBuilder, new DefaultLuceneQueryTransformer().transformLuceneQuery(luceneQuery).orElse("")); // Number of results uriBuilder.addParameter("pageSize", "30"); // Page (not needed so far) // uriBuilder.addParameter("page", "1"); return uriBuilder.build().toURL(); } @Override public Parser getParser() { return inputStream -> { String response = new BufferedReader(new InputStreamReader(inputStream)).lines().collect(Collectors.joining(OS.NEWLINE)); JSONObject jsonObject = new JSONObject(response); List<BibEntry> entries = new ArrayList<>(); if (jsonObject.has("results")) { JSONArray results = jsonObject.getJSONArray("results"); for (int i = 0; i < results.length(); i++) { JSONObject bibJsonEntry = results.getJSONObject(i).getJSONObject("bibjson"); BibEntry entry = parseBibJSONtoBibtex(bibJsonEntry, preferences.bibEntryPreferences().getKeywordSeparator()); entries.add(entry); } } return entries; }; } }
8,575
37.981818
133
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DiVA.java
package org.jabref.logic.importer.fetcher; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; import java.util.Optional; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.IdBasedParserFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.Parser; import org.jabref.logic.importer.fileformat.BibtexParser; import org.apache.http.client.utils.URIBuilder; /* * http://www.diva-portal.org/smash/aboutdiva.jsf?dswid=-3222 * DiVA portal contains research publications and student theses from 40 Swedish universities and research institutions. */ public class DiVA implements IdBasedParserFetcher { private final ImportFormatPreferences importFormatPreferences; public DiVA(ImportFormatPreferences importFormatPreferences) { this.importFormatPreferences = importFormatPreferences; } @Override public String getName() { return "DiVA"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_DIVA); } @Override public URL getUrlForIdentifier(String identifier) throws URISyntaxException, MalformedURLException, FetcherException { URIBuilder uriBuilder = new URIBuilder("http://www.diva-portal.org/smash/getreferences"); uriBuilder.addParameter("referenceFormat", "BibTex"); uriBuilder.addParameter("pids", identifier); return uriBuilder.build().toURL(); } @Override public Parser getParser() { return new BibtexParser(importFormatPreferences); } public boolean isValidId(String identifier) { return identifier.startsWith("diva2:"); } }
1,780
29.706897
122
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DoiFetcher.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLConnection; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.regex.Pattern; import org.jabref.logic.cleanup.FieldFormatterCleanup; import org.jabref.logic.formatter.bibtexfields.ClearFormatter; import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.EntryBasedFetcher; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.IdBasedFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.importer.util.MediaTypes; import org.jabref.logic.l10n.Localization; import org.jabref.logic.net.URLDownload; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.jabref.model.entry.types.StandardEntryType; import org.jabref.model.util.OptionalUtil; import com.google.common.util.concurrent.RateLimiter; import kong.unirest.json.JSONArray; import kong.unirest.json.JSONException; import kong.unirest.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class DoiFetcher implements IdBasedFetcher, EntryBasedFetcher { public static final String NAME = "DOI"; private static final String APS_JOURNAL_ORG_DOI_ID = "1103"; private static final String APS_SUFFIX = "([\\w]+\\.)([\\w]+\\.)([\\w]+)"; private static final Pattern APS_SUFFIX_PATTERN = Pattern.compile(APS_SUFFIX); private static final Logger LOGGER = LoggerFactory.getLogger(DoiFetcher.class); // 1000 request per 5 minutes. See https://support.datacite.org/docs/is-there-a-rate-limit-for-making-requests-against-the-datacite-apis private static final RateLimiter DATA_CITE_DCN_RATE_LIMITER = RateLimiter.create(3.33); /* * By default, it seems that CrossRef DOI Content Negotiation responses are returned by their API pools, more specifically the public one * (by default). See https://www.crossref.org/documentation/retrieve-metadata/content-negotiation/ * Experimentally, the rating applied to this pool is defined by response headers "X-Rate-Limit-Interval" and "X-Rate-Limit-Limit", which seems * to default to 50 request / second. However, because of its dynamic nature, this rate could change between API calls, so we need to update it * atomically when that happens (as multiple threads might access it at the same time) */ private static final RateLimiter CROSSREF_DCN_RATE_LIMITER = RateLimiter.create(50.0); private final ImportFormatPreferences preferences; public DoiFetcher(ImportFormatPreferences preferences) { this.preferences = preferences; } @Override public String getName() { return DoiFetcher.NAME; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_DOI); } private void doAPILimiting(String identifier) { // Without a generic API Rate Limiter implemented on the project, use Guava's RateLimiter for avoiding // API throttling when multiple threads are working, specially during DOI Content Negotiations Optional<DOI> doi = DOI.parse(identifier); try { Optional<String> agency; if (doi.isPresent() && (agency = getAgency(doi.get())).isPresent()) { double waitingTime = 0.0; if ("datacite".equalsIgnoreCase(agency.get())) { waitingTime = DATA_CITE_DCN_RATE_LIMITER.acquire(); } else if ("crossref".equalsIgnoreCase(agency.get())) { waitingTime = CROSSREF_DCN_RATE_LIMITER.acquire(); } // mEDRA does not explicit an API rating LOGGER.trace(String.format("Thread %s, searching for DOI '%s', waited %.2fs because of API rate limiter", Thread.currentThread().threadId(), identifier, waitingTime)); } } catch (IOException e) { LOGGER.warn("Could not limit DOI API access rate", e); } } protected CompletableFuture<Optional<BibEntry>> asyncPerformSearchById(String identifier) { doAPILimiting(identifier); return CompletableFuture.supplyAsync(() -> { try { return performSearchById(identifier); } catch (FetcherException e) { throw new CompletionException(e); } }); } @Override public Optional<BibEntry> performSearchById(String identifier) throws FetcherException { Optional<DOI> doi = DOI.parse(identifier); try { if (doi.isPresent()) { Optional<BibEntry> fetchedEntry; // mEDRA does not return a parsable bibtex string Optional<String> agency = getAgency(doi.get()); if (agency.isPresent() && "medra".equalsIgnoreCase(agency.get())) { return new Medra().performSearchById(identifier); } URL doiURL = new URL(doi.get().getURIAsASCIIString()); // BibTeX data URLDownload download = getUrlDownload(doiURL); download.addHeader("Accept", MediaTypes.APPLICATION_BIBTEX); String bibtexString; URLConnection openConnection; try { openConnection = download.openConnection(); bibtexString = URLDownload.asString(openConnection); } catch (IOException e) { // an IOException with a nested FetcherException will be thrown when you encounter a 400x or 500x http status code if (e.getCause() instanceof FetcherException fe) { throw fe; } throw e; } // BibTeX entry fetchedEntry = BibtexParser.singleFromString(bibtexString, preferences); fetchedEntry.ifPresent(this::doPostCleanup); // Crossref has a dynamic API rate limit if (agency.isPresent() && "crossref".equalsIgnoreCase(agency.get())) { updateCrossrefAPIRate(openConnection); } // Check if the entry is an APS journal and add the article id as the page count if page field is missing if (fetchedEntry.isPresent() && fetchedEntry.get().hasField(StandardField.DOI)) { BibEntry entry = fetchedEntry.get(); if (isAPSJournal(entry, entry.getField(StandardField.DOI).get()) && !entry.hasField(StandardField.PAGES)) { setPageCountToArticleId(entry, entry.getField(StandardField.DOI).get()); } } if (openConnection instanceof HttpURLConnection connection) { connection.disconnect(); } return fetchedEntry; } else { throw new FetcherException(Localization.lang("Invalid DOI: '%0'.", identifier)); } } catch (IOException e) { throw new FetcherException(Localization.lang("Connection error"), e); } catch (ParseException e) { throw new FetcherException("Could not parse BibTeX entry", e); } catch (JSONException e) { throw new FetcherException("Could not retrieve Registration Agency", e); } } private void doPostCleanup(BibEntry entry) { new FieldFormatterCleanup(StandardField.PAGES, new NormalizePagesFormatter()).cleanup(entry); new FieldFormatterCleanup(StandardField.URL, new ClearFormatter()).cleanup(entry); } private void updateCrossrefAPIRate(URLConnection existingConnection) { try { // Assuming this field is given in seconds String xRateLimitInterval = existingConnection.getHeaderField("X-Rate-Limit-Interval").replaceAll("[^\\.0123456789]", ""); String xRateLimit = existingConnection.getHeaderField("X-Rate-Limit-Limit"); double newRate = Double.parseDouble(xRateLimit) / Double.parseDouble(xRateLimitInterval); double oldRate = CROSSREF_DCN_RATE_LIMITER.getRate(); // In theory, the actual update might rarely happen... if (Math.abs(newRate - oldRate) >= 1.0) { LOGGER.info(String.format("Updated Crossref API rate limit from %.2f to %.2f", oldRate, newRate)); CROSSREF_DCN_RATE_LIMITER.setRate(newRate); } } catch (NullPointerException | IllegalArgumentException e) { LOGGER.warn("Could not deduce Crossref API's rate limit from response header. API might have changed"); } } @Override public List<BibEntry> performSearch(BibEntry entry) throws FetcherException { Optional<String> doi = entry.getField(StandardField.DOI); if (doi.isPresent()) { return OptionalUtil.toList(performSearchById(doi.get())); } else { return Collections.emptyList(); } } /** * Returns registration agency. Optional.empty() if no agency is found. * * @param doi the DOI to be searched */ public Optional<String> getAgency(DOI doi) throws IOException { Optional<String> agency = Optional.empty(); try { URLDownload download = getUrlDownload(new URL(DOI.AGENCY_RESOLVER + "/" + doi.getDOI())); JSONObject response = new JSONArray(download.asString()).getJSONObject(0); if (response != null) { agency = Optional.ofNullable(response.optString("RA")); } } catch (JSONException e) { LOGGER.error("Cannot parse agency fetcher response to JSON"); return Optional.empty(); } return agency; } private void setPageCountToArticleId(BibEntry entry, String doiAsString) { String articleId = doiAsString.substring(doiAsString.lastIndexOf('.') + 1); entry.setField(StandardField.PAGES, articleId); } // checks if the entry is an APS journal by comparing the organization id and the suffix format private boolean isAPSJournal(BibEntry entry, String doiAsString) { if (!entry.getType().equals(StandardEntryType.Article)) { return false; } String suffix = doiAsString.substring(doiAsString.lastIndexOf('/') + 1); String organizationId = doiAsString.substring(doiAsString.indexOf('.') + 1, doiAsString.indexOf('/')); return organizationId.equals(APS_JOURNAL_ORG_DOI_ID) && APS_SUFFIX_PATTERN.matcher(suffix).matches(); } }
11,077
43.850202
147
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/DoiResolution.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ImporterPreferences; import org.jabref.logic.importer.WebFetchers; import org.jabref.logic.net.URLDownload; import org.jabref.logic.preferences.DOIPreferences; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.entry.identifier.DOI; import org.jsoup.Connection; import org.jsoup.Jsoup; import org.jsoup.UnsupportedMimeTypeException; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * FulltextFetcher implementation that follows the DOI resolution redirects and scans for a full-text PDF URL. * * Note that we also have custom fetchers in place. * See {@link WebFetchers#getFullTextFetchers(ImportFormatPreferences, ImporterPreferences)}. */ public class DoiResolution implements FulltextFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(DoiResolution.class); private DOIPreferences doiPreferences; public DoiResolution(DOIPreferences doiPreferences) { super(); this.doiPreferences = doiPreferences; } @Override public Optional<URL> findFullText(BibEntry entry) throws IOException { Objects.requireNonNull(entry); Optional<DOI> doi = entry.getField(StandardField.DOI).flatMap(DOI::parse); if (doi.isEmpty()) { return Optional.empty(); } URL base; String doiLink; if (doiPreferences.isUseCustom()) { base = new URL(doiPreferences.getDefaultBaseURI()); doiLink = doi.get() .getExternalURIWithCustomBase(base.toString()) .map(URI::toASCIIString) .orElse(""); } else { base = DOI.RESOLVER.toURL(); doiLink = doi.get().getURIAsASCIIString(); } if (doiLink.isEmpty()) { return Optional.empty(); } // follow all redirects and scan for a single pdf link try { Connection connection = Jsoup.connect(doiLink); // pretend to be a browser (agent & referrer) connection.userAgent(URLDownload.USER_AGENT); connection.referrer("https://www.google.com"); connection.followRedirects(true); connection.ignoreHttpErrors(true); // some publishers are quite slow (default is 3s) connection.timeout(30_000); Connection.Response response = connection.execute(); Document html = response.parse(); // citation pdf meta tag Optional<URL> citationMetaTag = citationMetaTag(html); if (citationMetaTag.isPresent()) { return citationMetaTag; } Optional<URL> embeddedLink = findEmbeddedLink(html, base); if (embeddedLink.isPresent()) { return embeddedLink; } // scan for PDF Elements hrefElements = html.body().select("a[href]"); List<URL> links = new ArrayList<>(); for (Element element : hrefElements) { String href = element.attr("abs:href").toLowerCase(Locale.ENGLISH); String hrefText = element.text().toLowerCase(Locale.ENGLISH); // Only check if pdf is included in the link or inside the text // ACM uses tokens without PDF inside the link // See https://github.com/lehner/LocalCopy for more scrape ideas // link with "PDF" in title tag if (element.attr("title").toLowerCase(Locale.ENGLISH).contains("pdf") && new URLDownload(href).isPdf()) { return Optional.of(new URL(href)); } if (href.contains("pdf") || hrefText.contains("pdf") && new URLDownload(href).isPdf()) { links.add(new URL(href)); } } // return if only one link was found (high accuracy) if (links.size() == 1) { LOGGER.info("Fulltext PDF found @ {}", doiLink); return Optional.of(links.get(0)); } // return if links are equal return findDistinctLinks(links); } catch (UnsupportedMimeTypeException type) { // this might be the PDF already as we follow redirects if (type.getMimeType().startsWith("application/pdf")) { return Optional.of(new URL(type.getUrl())); } LOGGER.warn("DoiResolution fetcher failed: ", type); } catch (IOException e) { LOGGER.warn("DoiResolution fetcher failed: ", e); } return Optional.empty(); } /** * Scan for <meta name="citation_pdf_url"> * See https://scholar.google.com/intl/de/scholar/inclusion.html#indexing */ private Optional<URL> citationMetaTag(Document html) { Elements citationPdfUrlElement = html.head().select("meta[name='citation_pdf_url']"); Optional<String> citationPdfUrl = citationPdfUrlElement.stream().map(e -> e.attr("content")).findFirst(); if (citationPdfUrl.isPresent()) { try { return Optional.of(new URL(citationPdfUrl.get())); } catch (MalformedURLException e) { return Optional.empty(); } } return Optional.empty(); } private Optional<URL> findEmbeddedLink(Document html, URL base) { Elements embedElement = html.body().select("embed[id='pdf']"); Optional<String> pdfUrl = embedElement .stream() .map(e -> e.attr("src")).findFirst(); if (pdfUrl.isPresent()) { try { URL url = base.toURI().resolve(pdfUrl.get()).toURL(); return Optional.of(url); } catch (MalformedURLException | URISyntaxException e) { return Optional.empty(); } } return Optional.empty(); } private Optional<URL> findDistinctLinks(List<URL> urls) { List<URL> distinctLinks = urls.stream().distinct().collect(Collectors.toList()); if (distinctLinks.isEmpty()) { return Optional.empty(); } // equal if (distinctLinks.size() == 1) { return Optional.of(distinctLinks.get(0)); } return Optional.empty(); } @Override public TrustLevel getTrustLevel() { return TrustLevel.SOURCE; } }
7,059
35.205128
121
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/GoogleScholar.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.io.StringReader; import java.net.HttpCookie; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jabref.logic.help.HelpFile; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.FulltextFetcher; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.PagedSearchBasedFetcher; import org.jabref.logic.importer.ParserResult; import org.jabref.logic.importer.fetcher.transformers.ScholarQueryTransformer; import org.jabref.logic.importer.fileformat.BibtexParser; import org.jabref.logic.l10n.Localization; import org.jabref.logic.net.URLDownload; import org.jabref.model.entry.BibEntry; import org.jabref.model.entry.field.StandardField; import org.jabref.model.paging.Page; import org.apache.http.client.utils.URIBuilder; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.select.Elements; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * FulltextFetcher implementation that attempts to find a PDF URL at GoogleScholar. * <p> * Search String infos: https://scholar.google.com/intl/en/scholar/help.html#searching */ public class GoogleScholar implements FulltextFetcher, PagedSearchBasedFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(GoogleScholar.class); private static final Pattern LINK_TO_BIB_PATTERN = Pattern.compile("(https:\\/\\/scholar.googleusercontent.com\\/scholar.bib[^\"]*)"); private static final String BASIC_SEARCH_URL = "https://scholar.google.ch/scholar?"; private static final int NUM_RESULTS = 10; private final ImportFormatPreferences importFormatPreferences; public GoogleScholar(ImportFormatPreferences importFormatPreferences) { Objects.requireNonNull(importFormatPreferences); this.importFormatPreferences = importFormatPreferences; } @Override public Optional<URL> findFullText(BibEntry entry) throws IOException, FetcherException { Objects.requireNonNull(entry); // Search in title if (!entry.hasField(StandardField.TITLE)) { return Optional.empty(); } try { // title search URIBuilder uriBuilder = new URIBuilder(BASIC_SEARCH_URL); uriBuilder.addParameter("as_q", ""); // as_epq as exact phrase uriBuilder.addParameter("as_epq", entry.getField(StandardField.TITLE).orElse("")); // as_occt field to search in uriBuilder.addParameter("as_occt", "title"); return search(uriBuilder.toString()); } catch (URISyntaxException e) { throw new FetcherException("Building URI failed.", e); } } @Override public TrustLevel getTrustLevel() { return TrustLevel.META_SEARCH; } private Optional<URL> search(String url) throws IOException { Optional<URL> pdfLink = Optional.empty(); Document doc = Jsoup.connect(url).userAgent(URLDownload.USER_AGENT).get(); if (needsCaptcha(doc.body().html())) { LOGGER.warn("Hit Google traffic limitation. Captcha prevents automatic fetching."); return Optional.empty(); } // Check results for PDF link // TODO: link always on first result or none? for (int i = 0; i < NUM_RESULTS; i++) { Elements link = doc.select(String.format("div[data-rp=%S] div.gs_or_ggsm a", i)); if (link.first() != null) { String target = link.first().attr("href"); // link present? if (!target.isEmpty() && new URLDownload(target).isPdf()) { // TODO: check title inside pdf + length? // TODO: report error function needed?! query -> result LOGGER.info("Fulltext PDF found @ Google: " + target); pdfLink = Optional.of(new URL(target)); break; } } } return pdfLink; } private boolean needsCaptcha(String body) { return body.contains("id=\"gs_captcha_ccl\""); } @Override public String getName() { return "Google Scholar"; } @Override public Optional<HelpFile> getHelpPage() { return Optional.of(HelpFile.FETCHER_GOOGLE_SCHOLAR); } private void addHitsFromQuery(List<BibEntry> entryList, String queryURL) throws IOException, FetcherException { String content = new URLDownload(queryURL).asString(); if (needsCaptcha(content)) { throw new FetcherException("Fetching from Google Scholar failed: Captacha hit at " + queryURL + ".", Localization.lang("This might be caused by reaching the traffic limitation of Google Scholar (see 'Help' for details)."), null); } Matcher matcher = LINK_TO_BIB_PATTERN.matcher(content); while (matcher.find()) { String citationsPageURL = matcher.group().replace("&amp;", "&"); BibEntry newEntry = downloadEntry(citationsPageURL); entryList.add(newEntry); } } private BibEntry downloadEntry(String link) throws IOException, FetcherException { String downloadedContent = new URLDownload(link).asString(); BibtexParser parser = new BibtexParser(importFormatPreferences); ParserResult result = parser.parse(new StringReader(downloadedContent)); if ((result == null) || (result.getDatabase() == null)) { throw new FetcherException("Parsing entries from Google Scholar bib file failed."); } else { Collection<BibEntry> entries = result.getDatabase().getEntries(); if (entries.size() != 1) { LOGGER.debug(entries.size() + " entries found! (" + link + ")"); throw new FetcherException("Parsing entries from Google Scholar bib file failed."); } else { BibEntry entry = entries.iterator().next(); return entry; } } } private void obtainAndModifyCookie() throws FetcherException { try { URLDownload downloader = new URLDownload("https://scholar.google.com"); List<HttpCookie> cookies = downloader.getCookieFromUrl(); for (HttpCookie cookie : cookies) { // append "CF=4" which represents "Citation format bibtex" cookie.setValue(cookie.getValue() + ":CF=4"); } } catch (IOException e) { throw new FetcherException("Cookie configuration for Google Scholar failed.", e); } } @Override public Page<BibEntry> performSearchPaged(QueryNode luceneQuery, int pageNumber) throws FetcherException { ScholarQueryTransformer queryTransformer = new ScholarQueryTransformer(); String transformedQuery = queryTransformer.transformLuceneQuery(luceneQuery).orElse(""); try { obtainAndModifyCookie(); List<BibEntry> foundEntries = new ArrayList<>(10); URIBuilder uriBuilder = new URIBuilder(BASIC_SEARCH_URL); uriBuilder.addParameter("hl", "en"); uriBuilder.addParameter("btnG", "Search"); uriBuilder.addParameter("q", transformedQuery); uriBuilder.addParameter("start", String.valueOf(pageNumber * getPageSize())); uriBuilder.addParameter("num", String.valueOf(getPageSize())); uriBuilder.addParameter("as_ylo", String.valueOf(queryTransformer.getStartYear())); uriBuilder.addParameter("as_yhi", String.valueOf(queryTransformer.getEndYear())); try { addHitsFromQuery(foundEntries, uriBuilder.toString()); if (foundEntries.size() == 10) { uriBuilder.addParameter("start", "10"); addHitsFromQuery(foundEntries, uriBuilder.toString()); } } catch (IOException e) { LOGGER.info("IOException for URL {}", uriBuilder.toString()); // if there are too much requests from the same IP adress google is answering with a 503 and redirecting to a captcha challenge // The caught IOException looks for example like this: // java.io.IOException: Server returned HTTP response code: 503 for URL: https://ipv4.google.com/sorry/index?continue=https://scholar.google.com/scholar%3Fhl%3Den%26btnG%3DSearch%26q%3Dbpmn&hl=en&q=CGMSBI0NBDkYuqy9wAUiGQDxp4NLQCWbIEY1HjpH5zFJhv4ANPGdWj0 if (e.getMessage().contains("Server returned HTTP response code: 503 for URL")) { throw new FetcherException("Fetching from Google Scholar failed.", Localization.lang("This might be caused by reaching the traffic limitation of Google Scholar (see 'Help' for details)."), e); } else { throw new FetcherException("Error while fetching from " + getName(), e); } } return new Page<>(transformedQuery, pageNumber, foundEntries); } catch (URISyntaxException e) { throw new FetcherException("Error while fetching from " + getName(), e); } } }
9,578
42.739726
269
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/GrobidCitationFetcher.java
package org.jabref.logic.importer.fetcher; import java.io.IOException; import java.net.SocketTimeoutException; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; import org.jabref.logic.importer.FetcherException; import org.jabref.logic.importer.ImportFormatPreferences; import org.jabref.logic.importer.ParseException; import org.jabref.logic.importer.SearchBasedFetcher; import org.jabref.logic.importer.util.GrobidService; import org.jabref.model.entry.BibEntry; import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; import org.jooq.lambda.Unchecked; import org.jsoup.HttpStatusException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class GrobidCitationFetcher implements SearchBasedFetcher { private static final Logger LOGGER = LoggerFactory.getLogger(GrobidCitationFetcher.class); private final ImportFormatPreferences importFormatPreferences; private final GrobidService grobidService; public GrobidCitationFetcher(GrobidPreferences grobidPreferences, ImportFormatPreferences importFormatPreferences) { this(importFormatPreferences, new GrobidService(grobidPreferences)); } GrobidCitationFetcher(ImportFormatPreferences importFormatPreferences, GrobidService grobidService) { this.importFormatPreferences = importFormatPreferences; this.grobidService = grobidService; } /** * Passes request to grobid server, using consolidateCitations option to improve result. Takes a while, since the * server has to look up the entry. * * @return A BibTeX string if extraction is successful * @throws FetcherException */ private Optional<BibEntry> parseUsingGrobid(String plainText) throws FetcherException { try { return grobidService.processCitation(plainText, importFormatPreferences, GrobidService.ConsolidateCitations.WITH_METADATA); } catch (HttpStatusException e) { String msg = "Connection failure."; LOGGER.debug(msg, e); throw new FetcherException(msg, e.getCause()); } catch (SocketTimeoutException e) { String msg = "Connection timed out."; LOGGER.debug(msg, e); throw new FetcherException(msg, e.getCause()); } catch (IOException | ParseException e) { String msg = "Could not process citation. " + e.getMessage(); LOGGER.debug(msg, e); return Optional.empty(); } } @Override public String getName() { return "GROBID"; } @Override public List<BibEntry> performSearch(String searchQuery) throws FetcherException { List<BibEntry> collect; collect = Arrays.stream(searchQuery.split("\\r\\r+|\\n\\n+|\\r\\n(\\r\\n)+")) .map(String::trim) .filter(str -> !str.isBlank()) .map(Unchecked.function(this::parseUsingGrobid)) .flatMap(Optional::stream) .collect(Collectors.toList()); return collect; } /** * Not used */ @Override public List<BibEntry> performSearch(QueryNode luceneQuery) throws FetcherException { return Collections.emptyList(); } }
3,345
36.177778
135
java
null
jabref-main/src/main/java/org/jabref/logic/importer/fetcher/GrobidPreferences.java
package org.jabref.logic.importer.fetcher; import javafx.beans.property.BooleanProperty; import javafx.beans.property.SimpleBooleanProperty; import javafx.beans.property.SimpleStringProperty; import javafx.beans.property.StringProperty; public class GrobidPreferences { private final BooleanProperty grobidEnabled; private final BooleanProperty grobidOptOut; private final StringProperty grobidURL; public GrobidPreferences(boolean grobidEnabled, boolean grobidOptOut, String grobidURL) { this.grobidEnabled = new SimpleBooleanProperty(grobidEnabled); this.grobidOptOut = new SimpleBooleanProperty(grobidOptOut); this.grobidURL = new SimpleStringProperty(grobidURL); } public boolean isGrobidEnabled() { return grobidEnabled.get(); } public BooleanProperty grobidEnabledProperty() { return grobidEnabled; } public void setGrobidEnabled(boolean grobidEnabled) { this.grobidEnabled.set(grobidEnabled); } public boolean isGrobidOptOut() { return grobidOptOut.get(); } public BooleanProperty grobidOptOutProperty() { return grobidOptOut; } public void setGrobidOptOut(boolean grobidOptOut) { this.grobidOptOut.set(grobidOptOut); } public String getGrobidURL() { return grobidURL.get(); } public StringProperty grobidURLProperty() { return grobidURL; } public void setGrobidURL(String grobidURL) { this.grobidURL.set(grobidURL); } }
1,586
26.842105
70
java