repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
jabref-main/src/main/java/org/jabref/logic/autosaveandbackup/AutosaveManager.java
|
package org.jabref.logic.autosaveandbackup;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.jabref.logic.util.CoarseChangeFilter;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.event.AutosaveEvent;
import org.jabref.model.database.event.BibDatabaseContextChangedEvent;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Saves the given {@link BibDatabaseContext} on every {@link BibDatabaseContextChangedEvent} by posting a new {@link AutosaveEvent}.
* An intelligent {@link ScheduledThreadPoolExecutor} prevents a high load while saving and rejects all redundant save tasks.
* The scheduled action is stored and canceled if a newer save action is proposed.
*/
public class AutosaveManager {
private static final Logger LOGGER = LoggerFactory.getLogger(AutosaveManager.class);
private static final int DELAY_BETWEEN_AUTOSAVE_ATTEMPTS_IN_SECONDS = 31;
private static Set<AutosaveManager> runningInstances = new HashSet<>();
private final BibDatabaseContext bibDatabaseContext;
private final EventBus eventBus;
private final CoarseChangeFilter changeFilter;
private final ScheduledThreadPoolExecutor executor;
private boolean needsSave = false;
private AutosaveManager(BibDatabaseContext bibDatabaseContext) {
this.bibDatabaseContext = bibDatabaseContext;
this.eventBus = new EventBus();
this.changeFilter = new CoarseChangeFilter(bibDatabaseContext);
changeFilter.registerListener(this);
this.executor = new ScheduledThreadPoolExecutor(2);
this.executor.scheduleAtFixedRate(
() -> {
if (needsSave) {
eventBus.post(new AutosaveEvent());
needsSave = false;
}
},
DELAY_BETWEEN_AUTOSAVE_ATTEMPTS_IN_SECONDS,
DELAY_BETWEEN_AUTOSAVE_ATTEMPTS_IN_SECONDS,
TimeUnit.SECONDS);
}
@Subscribe
public void listen(@SuppressWarnings("unused") BibDatabaseContextChangedEvent event) {
if (!event.isFilteredOut()) {
this.needsSave = true;
}
}
private void shutdown() {
changeFilter.unregisterListener(this);
changeFilter.shutdown();
executor.shutdown();
}
/**
* Starts the Autosaver which is associated with the given {@link BibDatabaseContext}.
*
* @param bibDatabaseContext Associated {@link BibDatabaseContext}
*/
public static AutosaveManager start(BibDatabaseContext bibDatabaseContext) {
AutosaveManager autosaveManager = new AutosaveManager(bibDatabaseContext);
runningInstances.add(autosaveManager);
return autosaveManager;
}
/**
* Shuts down the Autosaver which is associated with the given {@link BibDatabaseContext}.
*
* @param bibDatabaseContext Associated {@link BibDatabaseContext}
*/
public static void shutdown(BibDatabaseContext bibDatabaseContext) {
runningInstances.stream().filter(instance -> instance.bibDatabaseContext == bibDatabaseContext).findAny()
.ifPresent(instance -> {
instance.shutdown();
runningInstances.remove(instance);
});
}
public void registerListener(Object listener) {
eventBus.register(listener);
}
public void unregisterListener(Object listener) {
try {
eventBus.unregister(listener);
} catch (IllegalArgumentException e) {
// occurs if the event source has not been registered, should not prevent shutdown
LOGGER.debug("Problem unregistering", e);
}
}
}
| 3,954
| 35.962617
| 133
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/autosaveandbackup/BackupManager.java
|
package org.jabref.logic.autosaveandbackup;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.FileTime;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.jabref.logic.bibtex.InvalidFieldValueException;
import org.jabref.logic.exporter.AtomicFileWriter;
import org.jabref.logic.exporter.BibWriter;
import org.jabref.logic.exporter.BibtexDatabaseWriter;
import org.jabref.logic.exporter.SaveConfiguration;
import org.jabref.logic.util.BackupFileType;
import org.jabref.logic.util.CoarseChangeFilter;
import org.jabref.logic.util.io.BackupFileUtil;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.event.BibDatabaseContextChangedEvent;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.preferences.PreferencesService;
import com.google.common.eventbus.Subscribe;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Backups the given bib database file from {@link BibDatabaseContext} on every {@link BibDatabaseContextChangedEvent}.
* An intelligent {@link ExecutorService} with a {@link BlockingQueue} prevents a high load while making backups and
* rejects all redundant backup tasks. This class does not manage the .bak file which is created when opening a
* database.
*/
public class BackupManager {
private static final Logger LOGGER = LoggerFactory.getLogger(BackupManager.class);
private static final int MAXIMUM_BACKUP_FILE_COUNT = 10;
private static final int DELAY_BETWEEN_BACKUP_ATTEMPTS_IN_SECONDS = 19;
private static Set<BackupManager> runningInstances = new HashSet<>();
private final BibDatabaseContext bibDatabaseContext;
private final PreferencesService preferences;
private final ScheduledThreadPoolExecutor executor;
private final CoarseChangeFilter changeFilter;
private final BibEntryTypesManager entryTypesManager;
// Contains a list of all backup paths
// During a write, the less recent backup file is deleted
private final Queue<Path> backupFilesQueue = new LinkedBlockingQueue<>();
private boolean needsBackup = false;
BackupManager(BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager, PreferencesService preferences) {
this.bibDatabaseContext = bibDatabaseContext;
this.entryTypesManager = entryTypesManager;
this.preferences = preferences;
this.executor = new ScheduledThreadPoolExecutor(2);
changeFilter = new CoarseChangeFilter(bibDatabaseContext);
changeFilter.registerListener(this);
}
/**
* Determines the most recent backup file name
*/
static Path getBackupPathForNewBackup(Path originalPath, Path backupDir) {
return BackupFileUtil.getPathForNewBackupFileAndCreateDirectory(originalPath, BackupFileType.BACKUP, backupDir);
}
/**
* Determines the most recent existing backup file name
*/
static Optional<Path> getLatestBackupPath(Path originalPath, Path backupDir) {
return BackupFileUtil.getPathOfLatestExistingBackupFile(originalPath, BackupFileType.BACKUP, backupDir);
}
/**
* Starts the BackupManager which is associated with the given {@link BibDatabaseContext}. As long as no database
* file is present in {@link BibDatabaseContext}, the {@link BackupManager} will do nothing.
*
* This method is not thread-safe. The caller has to ensure that this method is not called in parallel.
*
* @param bibDatabaseContext Associated {@link BibDatabaseContext}
*/
public static BackupManager start(BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager, PreferencesService preferences) {
BackupManager backupManager = new BackupManager(bibDatabaseContext, entryTypesManager, preferences);
backupManager.startBackupTask(preferences.getFilePreferences().getBackupDirectory());
runningInstances.add(backupManager);
return backupManager;
}
/**
* Marks the backup as discarded at the library which is associated with the given {@link BibDatabaseContext}.
*
* @param bibDatabaseContext Associated {@link BibDatabaseContext}
*/
public static void discardBackup(BibDatabaseContext bibDatabaseContext, Path backupDir) {
runningInstances.stream().filter(instance -> instance.bibDatabaseContext == bibDatabaseContext).forEach(backupManager -> backupManager.discardBackup(backupDir));
}
/**
* Shuts down the BackupManager which is associated with the given {@link BibDatabaseContext}.
*
* @param bibDatabaseContext Associated {@link BibDatabaseContext}
* @param createBackup True, if a backup should be created
* @param backupDir The path to the backup directory
*/
public static void shutdown(BibDatabaseContext bibDatabaseContext, Path backupDir, boolean createBackup) {
runningInstances.stream().filter(instance -> instance.bibDatabaseContext == bibDatabaseContext).forEach(backupManager -> backupManager.shutdown(backupDir, createBackup));
runningInstances.removeIf(instance -> instance.bibDatabaseContext == bibDatabaseContext);
}
/**
* Checks whether a backup file exists for the given database file. If it exists, it is checked whether it is
* newer and different from the original.
*
* In case a discarded file is present, the method also returns <code>false</code>, See also {@link #discardBackup()}.
*
* @param originalPath Path to the file a backup should be checked for. Example: jabref.bib.
*
* @return <code>true</code> if backup file exists AND differs from originalPath. <code>false</code> is the
* "default" return value in the good case. In case a discarded file exists, <code>false</code> is returned, too.
* In the case of an exception <code>true</code> is returned to ensure that the user checks the output.
*/
public static boolean backupFileDiffers(Path originalPath, Path backupDir) {
Path discardedFile = determineDiscardedFile(originalPath, backupDir);
if (Files.exists(discardedFile)) {
try {
Files.delete(discardedFile);
} catch (IOException e) {
LOGGER.error("Could not remove discarded file {}", discardedFile, e);
return true;
}
return false;
}
return getLatestBackupPath(originalPath, backupDir).map(latestBackupPath -> {
FileTime latestBackupFileLastModifiedTime;
try {
latestBackupFileLastModifiedTime = Files.getLastModifiedTime(latestBackupPath);
} catch (IOException e) {
LOGGER.debug("Could not get timestamp of backup file {}", latestBackupPath, e);
// If we cannot get the timestamp, we do show any warning
return false;
}
FileTime currentFileLastModifiedTime;
try {
currentFileLastModifiedTime = Files.getLastModifiedTime(originalPath);
} catch (IOException e) {
LOGGER.debug("Could not get timestamp of current file file {}", originalPath, e);
// If we cannot get the timestamp, we do show any warning
return false;
}
if (latestBackupFileLastModifiedTime.compareTo(currentFileLastModifiedTime) <= 0) {
// Backup is older than current file
// We treat the backup as non-different (even if it could differ)
return false;
}
try {
return Files.mismatch(originalPath, latestBackupPath) != -1L;
} catch (IOException e) {
LOGGER.debug("Could not compare original file and backup file.", e);
// User has to investigate in this case
return true;
}
}).orElse(false);
}
/**
* Restores the backup file by copying and overwriting the original one.
*
* @param originalPath Path to the file which should be equalized to the backup file.
*/
public static void restoreBackup(Path originalPath, Path backupDir) {
Optional<Path> backupPath = getLatestBackupPath(originalPath, backupDir);
if (backupPath.isEmpty()) {
LOGGER.error("There is no backup file");
return;
}
try {
Files.copy(backupPath.get(), originalPath, StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
LOGGER.error("Error while restoring the backup file.", e);
}
}
Optional<Path> determineBackupPathForNewBackup(Path backupDir) {
return bibDatabaseContext.getDatabasePath().map(path -> BackupManager.getBackupPathForNewBackup(path, backupDir));
}
/**
* This method is called as soon as the scheduler says: "Do the backup"
*
* <em>SIDE EFFECT: Deletes oldest backup file</em>
*
* @param backupPath the full path to the file where the library should be backed up to
*/
void performBackup(Path backupPath) {
if (!needsBackup) {
return;
}
// We opted for "while" to delete backups in case there are more than 10
while (backupFilesQueue.size() >= MAXIMUM_BACKUP_FILE_COUNT) {
Path lessRecentBackupFile = backupFilesQueue.poll();
try {
Files.delete(lessRecentBackupFile);
} catch (IOException e) {
LOGGER.error("Could not delete backup file {}", lessRecentBackupFile, e);
}
}
// code similar to org.jabref.gui.exporter.SaveDatabaseAction.saveDatabase
SaveConfiguration saveConfiguration = new SaveConfiguration()
.withMakeBackup(false)
.withMetadataSaveOrder(true)
.withReformatOnSave(preferences.getLibraryPreferences().shouldAlwaysReformatOnSave());
Charset encoding = bibDatabaseContext.getMetaData().getEncoding().orElse(StandardCharsets.UTF_8);
// We want to have successful backups only
// Thus, we do not use a plain "FileWriter", but the "AtomicFileWriter"
// Example: What happens if one hard powers off the machine (or kills the jabref process) during the write of the backup?
// This MUST NOT create a broken backup file that then jabref wants to "restore" from?
try (Writer writer = new AtomicFileWriter(backupPath, encoding, false)) {
BibWriter bibWriter = new BibWriter(writer, bibDatabaseContext.getDatabase().getNewLineSeparator());
new BibtexDatabaseWriter(
bibWriter,
saveConfiguration,
preferences.getFieldPreferences(),
preferences.getCitationKeyPatternPreferences(),
entryTypesManager)
.saveDatabase(bibDatabaseContext);
backupFilesQueue.add(backupPath);
// We wrote the file successfully
// Thus, we currently do not need any new backup
this.needsBackup = false;
} catch (IOException e) {
logIfCritical(backupPath, e);
}
}
private static Path determineDiscardedFile(Path file, Path backupDir) {
return backupDir.resolve(BackupFileUtil.getUniqueFilePrefix(file) + "--" + file.getFileName() + "--discarded");
}
/**
* Marks the backups as discarded.
*
* We do not delete any files, because the user might want to recover old backup files.
* Therefore, we mark discarded backups by a --discarded file.
*/
public void discardBackup(Path backupDir) {
Path path = determineDiscardedFile(bibDatabaseContext.getDatabasePath().get(), backupDir);
try {
Files.createFile(path);
} catch (IOException e) {
LOGGER.info("Could not create backup file {}", path, e);
}
}
private void logIfCritical(Path backupPath, IOException e) {
Throwable innermostCause = e;
while (innermostCause.getCause() != null) {
innermostCause = innermostCause.getCause();
}
boolean isErrorInField = innermostCause instanceof InvalidFieldValueException;
// do not print errors in field values into the log during autosave
if (!isErrorInField) {
LOGGER.error("Error while saving to file {}", backupPath, e);
}
}
@Subscribe
public synchronized void listen(@SuppressWarnings("unused") BibDatabaseContextChangedEvent event) {
if (!event.isFilteredOut()) {
this.needsBackup = true;
}
}
private void startBackupTask(Path backupDir) {
fillQueue(backupDir);
executor.scheduleAtFixedRate(
// We need to determine the backup path on each action, because we use the timestamp in the filename
() -> determineBackupPathForNewBackup(backupDir).ifPresent(path -> this.performBackup(path)),
DELAY_BETWEEN_BACKUP_ATTEMPTS_IN_SECONDS,
DELAY_BETWEEN_BACKUP_ATTEMPTS_IN_SECONDS,
TimeUnit.SECONDS);
}
private void fillQueue(Path backupDir) {
if (!Files.exists(backupDir)) {
return;
}
bibDatabaseContext.getDatabasePath().ifPresent(databasePath -> {
// code similar to {@link org.jabref.logic.util.io.BackupFileUtil.getPathOfLatestExisingBackupFile}
final String prefix = BackupFileUtil.getUniqueFilePrefix(databasePath) + "--" + databasePath.getFileName();
try {
List<Path> allSavFiles = Files.list(backupDir)
// just list the .sav belonging to the given targetFile
.filter(p -> p.getFileName().toString().startsWith(prefix))
.sorted().toList();
backupFilesQueue.addAll(allSavFiles);
} catch (IOException e) {
LOGGER.error("Could not determine most recent file", e);
}
});
}
/**
* Unregisters the BackupManager from the eventBus of {@link BibDatabaseContext}.
* This method should only be used when closing a database/JabRef in a normal way.
*
* @param backupDir The backup directory
* @param createBackup If the backup manager should still perform a backup
*/
private void shutdown(Path backupDir, boolean createBackup) {
changeFilter.unregisterListener(this);
changeFilter.shutdown();
executor.shutdown();
if (createBackup) {
// Ensure that backup is a recent one
determineBackupPathForNewBackup(backupDir).ifPresent(this::performBackup);
}
}
}
| 15,445
| 44.163743
| 178
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/auxparser/AuxParser.java
|
package org.jabref.logic.auxparser;
import java.nio.file.Path;
public interface AuxParser {
/**
* Executes the parsing logic and returns a result containing all information and the generated BibDatabase.
*
* @param auxFile Path to the LaTeX AUX file
* @return an AuxParserResult containing the generated BibDatabase and parsing statistics
*/
AuxParserResult parse(Path auxFile);
}
| 416
| 28.785714
| 112
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/auxparser/AuxParserResult.java
|
package org.jabref.logic.auxparser;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibtexString;
public class AuxParserResult {
private final BibDatabase masterDatabase;
private final Set<String> uniqueKeys = new HashSet<>();
private final List<String> unresolvedKeys = new ArrayList<>();
private final BibDatabase auxDatabase = new BibDatabase();
private int nestedAuxCount;
private int crossRefEntriesCount;
private int insertedStrings;
public AuxParserResult(BibDatabase masterDatabase) {
this.masterDatabase = masterDatabase;
}
public BibDatabase getGeneratedBibDatabase() {
return auxDatabase;
}
public List<String> getUnresolvedKeys() {
return unresolvedKeys;
}
public int getFoundKeysInAux() {
return uniqueKeys.size();
}
public int getResolvedKeysCount() {
return auxDatabase.getEntryCount() - crossRefEntriesCount;
}
public int getUnresolvedKeysCount() {
return unresolvedKeys.size();
}
/**
* Query the number of extra entries pulled in due to crossrefs from other entries.
*
* @return The number of additional entries pulled in due to crossref
*/
public int getCrossRefEntriesCount() {
return crossRefEntriesCount;
}
public void increaseCrossRefEntriesCounter() {
crossRefEntriesCount++;
}
public void increaseNestedAuxFilesCounter() {
nestedAuxCount++;
}
public void insertStrings(Collection<BibtexString> usedStrings) {
for (BibtexString string : usedStrings) {
auxDatabase.addString(string);
insertedStrings++;
}
}
public BibDatabase getMasterDatabase() {
return masterDatabase;
}
public int getNestedAuxCount() {
return nestedAuxCount;
}
public int getInsertedStrings() {
return insertedStrings;
}
public Set<String> getUniqueKeys() {
return uniqueKeys;
}
}
| 2,156
| 23.793103
| 87
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/auxparser/DefaultAuxParser.java
|
package org.jabref.logic.auxparser;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* LaTeX Aux to BibTeX Parser
* <p>
* Extracts a subset of BibTeX entries from a BibDatabase that are included in an AUX file. Also supports nested AUX
* files (latex \\include).
*
* There exists no specification of the AUX file. Every package, class or document can write to the AUX file. The AUX
* file consists of LaTeX macros and is read at the \begin{document} and again at the \end{document}.
*
* BibTeX citation: \citation{x,y,z} Biblatex citation: \abx@aux@cite{x,y,z} Nested AUX files: \@input{x}
*/
public class DefaultAuxParser implements AuxParser {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultAuxParser.class);
private static final Pattern CITE_PATTERN = Pattern.compile("\\\\(citation|abx@aux@cite)(\\{\\d+\\})?\\{(?<citationkey>.+)\\}");
private static final Pattern INPUT_PATTERN = Pattern.compile("\\\\@input\\{(.+)\\}");
private final BibDatabase masterDatabase;
/**
* Generates a database based on the given AUX file and BibTeX database
*
* @param database BibTeX database
*/
public DefaultAuxParser(BibDatabase database) {
masterDatabase = database;
}
@Override
public AuxParserResult parse(Path auxFile) {
return parseAuxFile(auxFile);
}
private AuxParserResult parseAuxFile(Path auxFile) {
AuxParserResult result = new AuxParserResult(masterDatabase);
// nested AUX files
List<Path> fileList = new ArrayList<>(1);
fileList.add(auxFile);
int fileIndex = 0;
while (fileIndex < fileList.size()) {
Path file = fileList.get(fileIndex);
try (BufferedReader br = Files.newBufferedReader(file)) {
String line;
while ((line = br.readLine()) != null) {
matchCitation(result, line);
matchNestedAux(auxFile, result, fileList, line);
}
} catch (FileNotFoundException e) {
LOGGER.warn("Cannot locate input file", e);
} catch (IOException e) {
LOGGER.warn("Problem opening file", e);
}
fileIndex++;
}
resolveTags(result);
return result;
}
private void matchNestedAux(Path baseAuxFile, AuxParserResult result, List<Path> fileList, String line) {
Matcher inputMatch = INPUT_PATTERN.matcher(line);
while (inputMatch.find()) {
String inputString = inputMatch.group(1);
Path inputFile;
Path rootPath = baseAuxFile.getParent();
if (rootPath != null) {
inputFile = rootPath.resolve(inputString);
} else {
inputFile = Path.of(inputString);
}
if (!fileList.contains(inputFile)) {
fileList.add(inputFile);
result.increaseNestedAuxFilesCounter();
}
}
}
private void matchCitation(AuxParserResult result, String line) {
Matcher citeMatch = CITE_PATTERN.matcher(line);
while (citeMatch.find()) {
String keyString = citeMatch.group("citationkey");
String[] keys = keyString.split(",");
for (String key : keys) {
result.getUniqueKeys().add(key.trim());
}
}
}
/**
* Try to find an equivalent BibTeX entry inside the reference database for all keys inside the AUX file.
*
* @param result AUX file
*/
private void resolveTags(AuxParserResult result) {
List<BibEntry> entriesToInsert = new ArrayList<>();
for (String key : result.getUniqueKeys()) {
if (!result.getGeneratedBibDatabase().getEntryByCitationKey(key).isPresent()) {
Optional<BibEntry> entry = masterDatabase.getEntryByCitationKey(key);
if (entry.isPresent()) {
entriesToInsert.add(entry.get());
} else {
result.getUnresolvedKeys().add(key);
}
}
}
insertEntries(entriesToInsert, result);
resolveCrossReferences(entriesToInsert, result);
// Copy database definitions
if (result.getGeneratedBibDatabase().hasEntries()) {
result.getGeneratedBibDatabase().copyPreamble(masterDatabase);
result.insertStrings(masterDatabase.getUsedStrings(result.getGeneratedBibDatabase().getEntries()));
}
}
/**
* Resolves and adds CrossRef entries to insert them in addition to the original entries
*
* @param entries Entries to check for CrossRefs
* @param result AUX file
*/
private void resolveCrossReferences(List<BibEntry> entries, AuxParserResult result) {
List<BibEntry> entriesToInsert = new ArrayList<>();
for (BibEntry entry : entries) {
entry.getField(StandardField.CROSSREF).ifPresent(crossref -> {
if (!result.getGeneratedBibDatabase().getEntryByCitationKey(crossref).isPresent()) {
Optional<BibEntry> refEntry = masterDatabase.getEntryByCitationKey(crossref);
if (refEntry.isPresent()) {
if (!entriesToInsert.contains(refEntry.get())) {
entriesToInsert.add(refEntry.get());
result.increaseCrossRefEntriesCounter();
}
} else {
result.getUnresolvedKeys().add(crossref);
}
}
});
}
insertEntries(entriesToInsert, result);
}
/**
* Insert a clone of each given entry. The clones are each given a new unique ID.
*
* @param entries Entries to be cloned
* @param result the parser result (representing the AUX file)
*/
private void insertEntries(List<BibEntry> entries, AuxParserResult result) {
List<BibEntry> clonedEntries = new ArrayList<>();
for (BibEntry entry : entries) {
BibEntry bibEntryToAdd = (BibEntry) entry.clone();
// ensure proper "rendering" of the BibTeX code
bibEntryToAdd.setChanged(true);
clonedEntries.add(bibEntryToAdd);
}
result.getGeneratedBibDatabase().insertEntries(clonedEntries);
}
}
| 6,871
| 34.791667
| 132
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/BibEntryWriter.java
|
package org.jabref.logic.bibtex;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Optional;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.jabref.logic.TypedBibEntry;
import org.jabref.logic.exporter.BibWriter;
import org.jabref.logic.util.OS;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.field.BibField;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.strings.StringUtil;
import org.slf4j.LoggerFactory;
public class BibEntryWriter {
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(BibEntryWriter.class);
private final BibEntryTypesManager entryTypesManager;
private final FieldWriter fieldWriter;
public BibEntryWriter(FieldWriter fieldWriter, BibEntryTypesManager entryTypesManager) {
this.fieldWriter = fieldWriter;
this.entryTypesManager = entryTypesManager;
}
public String serializeAll(List<BibEntry> entries, BibDatabaseMode databaseMode) throws IOException {
StringWriter writer = new StringWriter();
BibWriter bibWriter = new BibWriter(writer, OS.NEWLINE);
for (BibEntry entry : entries) {
write(entry, bibWriter, databaseMode);
}
return writer.toString();
}
public void write(BibEntry entry, BibWriter out, BibDatabaseMode bibDatabaseMode) throws IOException {
write(entry, out, bibDatabaseMode, false);
}
/**
* Writes the given BibEntry using the given writer
*
* @param entry The entry to write
* @param out The writer to use
* @param bibDatabaseMode The database mode (bibtex or biblatex)
* @param reformat Should the entry be in any case, even if no change occurred?
*/
public void write(BibEntry entry, BibWriter out, BibDatabaseMode bibDatabaseMode, Boolean reformat) throws IOException {
// if the entry has not been modified, write it as it was
if (!reformat && !entry.hasChanged()) {
out.write(entry.getParsedSerialization());
out.finishBlock();
return;
}
writeUserComments(entry, out);
writeRequiredFieldsFirstRemainingFieldsSecond(entry, out, bibDatabaseMode);
out.finishBlock();
}
private void writeUserComments(BibEntry entry, BibWriter out) throws IOException {
String userComments = entry.getUserComments();
if (!userComments.isEmpty()) {
out.write(userComments);
// ensure that a line break appears after the comment
out.finishLine();
}
}
/**
* Writes fields in the order of requiredFields, optionalFields and other fields, but does not sort the fields.
*/
private void writeRequiredFieldsFirstRemainingFieldsSecond(BibEntry entry, BibWriter out,
BibDatabaseMode bibDatabaseMode) throws IOException {
// Write header with type and bibtex-key
TypedBibEntry typedEntry = new TypedBibEntry(entry, bibDatabaseMode);
out.write('@' + typedEntry.getTypeForDisplay() + '{');
writeKeyField(entry, out);
Set<Field> written = new HashSet<>();
written.add(InternalField.KEY_FIELD);
final int indent = getLengthOfLongestFieldName(entry);
Optional<BibEntryType> type = entryTypesManager.enrich(entry.getType(), bibDatabaseMode);
if (type.isPresent()) {
// Write required fields first
List<Field> requiredFields = type.get()
.getRequiredFields()
.stream()
.flatMap(Collection::stream)
.sorted(Comparator.comparing(Field::getName))
.collect(Collectors.toList());
for (Field field : requiredFields) {
writeField(entry, out, field, indent);
}
// Then optional fields
List<Field> optionalFields = type.get()
.getOptionalFields()
.stream()
.map(BibField::field)
.sorted(Comparator.comparing(Field::getName))
.collect(Collectors.toList());
for (Field field : optionalFields) {
writeField(entry, out, field, indent);
}
written.addAll(requiredFields);
written.addAll(optionalFields);
}
// Then write remaining fields in alphabetic order.
SortedSet<Field> remainingFields = entry.getFields()
.stream()
.filter(key -> !written.contains(key))
.collect(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(Field::getName))));
for (Field field : remainingFields) {
writeField(entry, out, field, indent);
}
// Finally, end the entry.
out.writeLine("}");
}
private void writeKeyField(BibEntry entry, BibWriter out) throws IOException {
String keyField = StringUtil.shaveString(entry.getCitationKey().orElse(""));
out.writeLine(keyField + ',');
}
/**
* Write a single field, if it has any content.
*
* @param entry the entry to write
* @param out the target of the write
* @param field the field
* @throws IOException In case of an IO error
*/
private void writeField(BibEntry entry, BibWriter out, Field field, int indent) throws IOException {
Optional<String> value = entry.getField(field);
// only write field if it is not empty
// field.ifPresent does not work as an IOException may be thrown
if (value.isPresent() && !value.get().trim().isEmpty()) {
out.write(" ");
out.write(getFormattedFieldName(field, indent));
try {
out.write(fieldWriter.write(field, value.get()));
} catch (InvalidFieldValueException ex) {
LOGGER.warn("Invalid field value {} of field {} of entry {]", value.get(), field, entry.getCitationKey().orElse(""), ex);
throw new IOException("Error in field '" + field + " of entry " + entry.getCitationKey().orElse("") + "': " + ex.getMessage(), ex);
}
out.writeLine(",");
}
}
static int getLengthOfLongestFieldName(BibEntry entry) {
Predicate<Field> isNotCitationKey = field -> InternalField.KEY_FIELD != field;
return entry.getFields()
.stream()
.filter(isNotCitationKey)
.mapToInt(field -> field.getName().length())
.max()
.orElse(0);
}
/**
* Get display version of an entry field.
* <p>
* BibTeX is case-insensitive therefore there is no difference between: howpublished, HOWPUBLISHED, HowPublished, etc.
* <p>
* There was a long discussion about how JabRef should write the fields. See https://github.com/JabRef/jabref/issues/116
* <p>
* The team decided to do the biblatex way and use lower case for the field names.
*
* @param field The name of the field.
* @return The display version of the field name.
*/
static String getFormattedFieldName(Field field, int indent) {
String fieldName = field.getName();
return fieldName.toLowerCase(Locale.ROOT) + StringUtil.repeatSpaces(indent - fieldName.length()) + " = ";
}
}
| 8,266
| 39.925743
| 147
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/FieldContentFormatter.java
|
package org.jabref.logic.bibtex;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
/**
* This class provides the reformatting needed when reading BibTeX fields formatted
* in JabRef style. The reformatting must undo all formatting done by JabRef when
* writing the same fields.
*/
public class FieldContentFormatter {
// 's' matches a space, tab, new line, carriage return.
private static final Pattern WHITESPACE = Pattern.compile("\\s+");
private final Set<Field> multiLineFields;
public FieldContentFormatter(FieldPreferences preferences) {
Objects.requireNonNull(preferences);
multiLineFields = new HashSet<>();
// the following two are also coded in org.jabref.logic.bibtex.LatexFieldFormatter.format(String, String)
multiLineFields.add(StandardField.ABSTRACT);
multiLineFields.add(StandardField.COMMENT);
multiLineFields.add(StandardField.REVIEW);
// the file field should not be formatted, therefore we treat it as a multi line field
multiLineFields.addAll(preferences.getNonWrappableFields());
}
/**
* Performs the reformatting
*
* @param fieldContent the content to format
* @param field the name of the bibtex field
* @return the formatted field content.
*/
public String format(String fieldContent, Field field) {
if (multiLineFields.contains(field)) {
// Keep the field as is.
// Newlines are normalized at org.jabref.logic.exporter.BibWriter
// Alternative: StringUtil.unifyLineBreaks(fieldContent, OS.NEWLINE)
return fieldContent;
}
return WHITESPACE.matcher(fieldContent).replaceAll(" ");
}
public String format(StringBuilder fieldContent, Field field) {
return format(fieldContent.toString(), field);
}
}
| 2,006
| 34.210526
| 113
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/FieldPreferences.java
|
package org.jabref.logic.bibtex;
import java.util.Collection;
import java.util.List;
import javafx.beans.property.BooleanProperty;
import javafx.beans.property.SimpleBooleanProperty;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import org.jabref.model.entry.field.Field;
public class FieldPreferences {
private final BooleanProperty resolveStrings = new SimpleBooleanProperty();
private final ObservableList<Field> resolvableFields;
private final ObservableList<Field> nonWrappableFields;
/**
* @param resolveStrings true - The character {@link FieldWriter#BIBTEX_STRING_START_END_SYMBOL} should be interpreted as indicator of BibTeX strings
*/
public FieldPreferences(boolean resolveStrings,
List<Field> resolvableFields,
List<Field> nonWrappableFields) {
this.resolveStrings.set(resolveStrings);
this.resolvableFields = FXCollections.observableArrayList(resolvableFields);
this.nonWrappableFields = FXCollections.observableArrayList(nonWrappableFields);
}
public boolean shouldResolveStrings() {
return resolveStrings.get();
}
public BooleanProperty resolveStringsProperty() {
return resolveStrings;
}
public void setResolveStrings(boolean resolveStrings) {
this.resolveStrings.set(resolveStrings);
}
public ObservableList<Field> getResolvableFields() {
return resolvableFields;
}
public void setResolvableFields(Collection<Field> list) {
resolvableFields.clear();
resolvableFields.addAll(list);
}
public ObservableList<Field> getNonWrappableFields() {
return nonWrappableFields;
}
public void setNonWrappableFields(Collection<Field> list) {
nonWrappableFields.clear();
nonWrappableFields.addAll(list);
}
}
| 1,902
| 30.716667
| 153
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/FieldWriter.java
|
package org.jabref.logic.bibtex;
import org.jabref.model.entry.field.Field;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Converts JabRef's internal BibTeX representation of a BibTeX field to BibTeX text representation
*/
public class FieldWriter {
// See also ADR-0024
public static final char BIBTEX_STRING_START_END_SYMBOL = '#';
private static final Logger LOGGER = LoggerFactory.getLogger(FieldWriter.class);
private static final char FIELD_START = '{';
private static final char FIELD_END = '}';
private final boolean neverFailOnHashes;
private final FieldPreferences preferences;
private final FieldContentFormatter formatter;
public FieldWriter(FieldPreferences preferences) {
this(true, preferences);
}
private FieldWriter(boolean neverFailOnHashes, FieldPreferences preferences) {
this.neverFailOnHashes = neverFailOnHashes;
this.preferences = preferences;
formatter = new FieldContentFormatter(preferences);
}
public static FieldWriter buildIgnoreHashes(FieldPreferences prefs) {
return new FieldWriter(true, prefs);
}
private static void checkBraces(String text) throws InvalidFieldValueException {
int left = 0;
int right = 0;
// First we collect all occurrences:
for (int i = 0; i < text.length(); i++) {
char item = text.charAt(i);
boolean charBeforeIsEscape = false;
if ((i > 0) && (text.charAt(i - 1) == '\\')) {
charBeforeIsEscape = true;
}
if (!charBeforeIsEscape && (item == '{')) {
left++;
} else if (!charBeforeIsEscape && (item == '}')) {
right++;
}
}
// Then we throw an exception if the error criteria are met.
if (right != 0 && (left == 0)) {
LOGGER.error("Unescaped '}' character without opening bracket ends string prematurely. Field value: {}", text);
throw new InvalidFieldValueException("Unescaped '}' character without opening bracket ends string prematurely. Field value: " + text);
}
if (right != 0 && (right < left)) {
LOGGER.error("Unescaped '}' character without opening bracket ends string prematurely. Field value: {}", text);
throw new InvalidFieldValueException("Unescaped '}' character without opening bracket ends string prematurely. Field value: " + text);
}
if (left != right) {
LOGGER.error("Braces don't match. Field value: {}", text);
throw new InvalidFieldValueException("Braces don't match. Field value: " + text);
}
}
/**
* Formats the content of a field.
*
* @param field the name of the field - used to trigger different serializations, e.g., turning off resolution for some strings
* @param content the content of the field
* @return a formatted string suitable for output
* @throws InvalidFieldValueException if s is not a correct bibtex string, e.g., because of improperly balanced braces or using # not paired
*/
public String write(Field field, String content) throws InvalidFieldValueException {
if (content == null) {
return FIELD_START + String.valueOf(FIELD_END);
}
if (!shouldResolveStrings(field)) {
return formatWithoutResolvingStrings(content, field);
}
return formatAndResolveStrings(content, field);
}
/**
* This method handles # in the field content to get valid bibtex strings
* <p>
* For instance, <code>#jan# - #feb#</code> gets <code>jan #{ - } # feb</code> (see @link{org.jabref.logic.bibtex.LatexFieldFormatterTests#makeHashEnclosedWordsRealStringsInMonthField()})
*/
private String formatAndResolveStrings(String content, Field field) throws InvalidFieldValueException {
checkBraces(content);
StringBuilder stringBuilder = new StringBuilder();
// Here we assume that the user encloses any bibtex strings in #, e.g.:
// #jan# - #feb#
// ...which will be written to the file like this:
// jan # { - } # feb
int pivot = 0;
while (pivot < content.length()) {
int goFrom = pivot;
int pos1 = pivot;
while (goFrom == pos1) {
pos1 = content.indexOf(BIBTEX_STRING_START_END_SYMBOL, goFrom);
if ((pos1 > 0) && (content.charAt(pos1 - 1) == '\\')) {
goFrom = pos1 + 1;
pos1++;
} else {
goFrom = pos1 - 1; // Ends the loop.
}
}
int pos2;
if (pos1 == -1) {
pos1 = content.length(); // No more occurrences found.
pos2 = -1;
} else {
pos2 = content.indexOf(BIBTEX_STRING_START_END_SYMBOL, pos1 + 1);
if (pos2 == -1) {
if (neverFailOnHashes) {
pos1 = content.length(); // just write out the rest of the text, and throw no exception
} else {
LOGGER.error("The character {} is not allowed in BibTeX strings unless escaped as in '\\{}'. "
+ "In JabRef, use pairs of # characters to indicate a string. "
+ "Note that the entry causing the problem has been selected. Field value: {}",
BIBTEX_STRING_START_END_SYMBOL,
BIBTEX_STRING_START_END_SYMBOL,
content);
throw new InvalidFieldValueException(
"The character " + BIBTEX_STRING_START_END_SYMBOL + " is not allowed in BibTeX strings unless escaped as in '\\" + BIBTEX_STRING_START_END_SYMBOL + "'.\n"
+ "In JabRef, use pairs of # characters to indicate a string.\n"
+ "Note that the entry causing the problem has been selected. Field value: " + content);
}
}
}
if (pos1 > pivot) {
writeText(stringBuilder, content, pivot, pos1);
}
if ((pos1 < content.length()) && ((pos2 - 1) > pos1)) {
// We check that the string label is not empty. That means
// an occurrence of ## will simply be ignored. Should it instead
// cause an error message?
writeStringLabel(stringBuilder, content, pos1 + 1, pos2, pos1 == pivot,
(pos2 + 1) == content.length());
}
if (pos2 > -1) {
pivot = pos2 + 1;
} else {
pivot = pos1 + 1;
}
}
return formatter.format(stringBuilder, field);
}
private boolean shouldResolveStrings(Field field) {
if (preferences.shouldResolveStrings()) {
// Resolve strings for the list of fields only
return preferences.getResolvableFields().contains(field);
}
return false;
}
private String formatWithoutResolvingStrings(String content, Field field) throws InvalidFieldValueException {
checkBraces(content);
StringBuilder stringBuilder = new StringBuilder(String.valueOf(FIELD_START));
stringBuilder.append(formatter.format(content, field));
stringBuilder.append(FIELD_END);
return stringBuilder.toString();
}
/**
* @param stringBuilder the StringBuilder to append the text to
* @param text the text to append
*/
private void writeText(StringBuilder stringBuilder, String text, int startPos, int endPos) {
stringBuilder.append(FIELD_START);
stringBuilder.append(text, startPos, endPos);
stringBuilder.append(FIELD_END);
}
/**
* @param stringBuilder the StringBuilder to append the text to
* @param text the text use as basis to get the text to append
* @param startPos the position in text where the text to add starts
* @param endPos the position in text where the text to add ends
* @param isFirst true if the label to write is the first one to write
* @param isLast true if the label to write is the last one to write
*/
private void writeStringLabel(StringBuilder stringBuilder, String text, int startPos, int endPos, boolean isFirst, boolean isLast) {
String line = (isFirst ? "" : " # ") + text.substring(startPos, endPos) + (isLast ? "" : " # ");
stringBuilder.append(line);
}
}
| 8,757
| 41.105769
| 192
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/FileFieldWriter.java
|
package org.jabref.logic.bibtex;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.jabref.model.entry.LinkedFile;
public class FileFieldWriter {
private FileFieldWriter() {
}
public static String getStringRepresentation(List<LinkedFile> fields) {
String[][] array = new String[fields.size()][];
int i = 0;
for (LinkedFile entry : fields) {
array[i] = new String[] {entry.getDescription(), entry.getLink(), entry.getFileType()};
i++;
}
return encodeStringArray(array);
}
public static String getStringRepresentation(LinkedFile field) {
return getStringRepresentation(Collections.singletonList(field));
}
/**
* Encodes a two-dimensional String array into a single string, using ':' and
* ';' as separators. The characters ':' and ';' are escaped with '\'.
*
* @param values The String array.
* @return The encoded String.
*/
public static String encodeStringArray(String[][] values) {
return Arrays.stream(values)
.map(FileFieldWriter::encodeStringArray)
.collect(Collectors.joining(";"));
}
/**
* Encodes a String array into a single string, using ':' as separator.
* The characters ':' and ';' are escaped with '\'.
*
* @param entry The String array.
* @return The encoded String.
*/
private static String encodeStringArray(String[] entry) {
return Arrays.stream(entry)
.map(FileFieldWriter::quote)
.collect(Collectors.joining(":"));
}
public static String quote(String s) {
if (s == null) {
return null;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if ((c == ';') || (c == ':') || (c == '\\')) {
sb.append('\\');
}
sb.append(c);
}
return sb.toString();
}
}
| 2,112
| 29.185714
| 99
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/InvalidFieldValueException.java
|
package org.jabref.logic.bibtex;
/**
* @deprecated implement as {@link org.jabref.logic.integrity.IntegrityCheck} instead.
*/
@Deprecated
public class InvalidFieldValueException extends Exception {
public InvalidFieldValueException(String message) {
super(message);
}
}
| 290
| 21.384615
| 86
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/BibDatabaseDiff.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import org.jabref.logic.database.DuplicateCheck;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.field.StandardField;
public class BibDatabaseDiff {
private static final double MATCH_THRESHOLD = 0.4;
private final MetaDataDiff metaDataDiff;
private final PreambleDiff preambleDiff;
private final List<BibStringDiff> bibStringDiffs;
private final List<BibEntryDiff> entryDiffs;
private BibDatabaseDiff(BibDatabaseContext originalDatabase, BibDatabaseContext newDatabase, boolean includeEmptyEntries) {
metaDataDiff = MetaDataDiff.compare(originalDatabase.getMetaData(), newDatabase.getMetaData()).orElse(null);
preambleDiff = PreambleDiff.compare(originalDatabase, newDatabase).orElse(null);
bibStringDiffs = BibStringDiff.compare(originalDatabase.getDatabase(), newDatabase.getDatabase());
// Sort both databases according to a common sort key.
EntryComparator comparator = getEntryComparator();
List<BibEntry> originalEntriesSorted = originalDatabase.getDatabase().getEntriesSorted(comparator);
List<BibEntry> newEntriesSorted = newDatabase.getDatabase().getEntriesSorted(comparator);
if (!includeEmptyEntries) {
originalEntriesSorted.removeIf(BibEntry::isEmpty);
newEntriesSorted.removeIf(BibEntry::isEmpty);
}
entryDiffs = compareEntries(originalEntriesSorted, newEntriesSorted, originalDatabase.getMode());
}
private static EntryComparator getEntryComparator() {
EntryComparator comparator = new EntryComparator(false, true, StandardField.TITLE);
comparator = new EntryComparator(false, true, StandardField.AUTHOR, comparator);
comparator = new EntryComparator(false, true, StandardField.YEAR, comparator);
return comparator;
}
private static List<BibEntryDiff> compareEntries(List<BibEntry> originalEntries, List<BibEntry> newEntries, BibDatabaseMode mode) {
List<BibEntryDiff> differences = new ArrayList<>();
// Create a HashSet where we can put references to entries in the new
// database that we have matched. This is to avoid matching them twice.
Set<Integer> used = new HashSet<>(newEntries.size());
Set<BibEntry> notMatched = new HashSet<>(originalEntries.size());
// Loop through the entries of the original database, looking for exact matches in the new one.
// We must finish scanning for exact matches before looking for near matches, to avoid an exact
// match being "stolen" from another entry.
mainLoop:
for (BibEntry originalEntry : originalEntries) {
for (int i = 0; i < newEntries.size(); i++) {
if (!used.contains(i)) {
double score = DuplicateCheck.compareEntriesStrictly(originalEntry, newEntries.get(i));
if (score > 1) {
used.add(i);
continue mainLoop;
}
}
}
// No? Add this entry to the list of non-matched entries.
notMatched.add(originalEntry);
}
// Now we've found all exact matches, look through the remaining entries, looking for close matches.
DuplicateCheck duplicateCheck = new DuplicateCheck(new BibEntryTypesManager());
for (BibEntry originalEntry : notMatched) {
// These two variables will keep track of which entry most closely matches the one we're looking at.
double bestMatch = 0;
int bestMatchIndex = 0;
for (int i = 0; i < newEntries.size(); i++) {
if (!used.contains(i)) {
double score = DuplicateCheck.compareEntriesStrictly(originalEntry, newEntries.get(i));
if (score > bestMatch) {
bestMatch = score;
bestMatchIndex = i;
}
}
}
BibEntry bestEntry = newEntries.get(bestMatchIndex);
if (bestMatch > MATCH_THRESHOLD
|| hasEqualCitationKey(originalEntry, bestEntry)
|| duplicateCheck.isDuplicate(originalEntry, bestEntry, mode)) {
used.add(bestMatchIndex);
differences.add(new BibEntryDiff(originalEntry, newEntries.get(bestMatchIndex)));
} else {
differences.add(new BibEntryDiff(originalEntry, null));
}
}
// Finally, look if there are still untouched entries in the new database. These may have been added.
for (int i = 0; i < newEntries.size(); i++) {
if (!used.contains(i)) {
differences.add(new BibEntryDiff(null, newEntries.get(i)));
}
}
return differences;
}
private static boolean hasEqualCitationKey(BibEntry oneEntry, BibEntry twoEntry) {
return oneEntry.hasCitationKey() && twoEntry.hasCitationKey() && oneEntry.getCitationKey().equals(twoEntry.getCitationKey());
}
public static BibDatabaseDiff compare(BibDatabaseContext base, BibDatabaseContext changed) {
return new BibDatabaseDiff(base, changed, false);
}
public Optional<MetaDataDiff> getMetaDataDifferences() {
return Optional.ofNullable(metaDataDiff);
}
public Optional<PreambleDiff> getPreambleDifferences() {
return Optional.ofNullable(preambleDiff);
}
public List<BibStringDiff> getBibStringDifferences() {
return bibStringDiffs;
}
public List<BibEntryDiff> getEntryDifferences() {
return entryDiffs;
}
}
| 5,987
| 42.708029
| 135
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/BibEntryDiff.java
|
package org.jabref.logic.bibtex.comparator;
import org.jabref.model.entry.BibEntry;
public class BibEntryDiff {
private final BibEntry originalEntry;
private final BibEntry newEntry;
public BibEntryDiff(BibEntry originalEntry, BibEntry newEntry) {
this.originalEntry = originalEntry;
this.newEntry = newEntry;
}
public BibEntry getOriginalEntry() {
return originalEntry;
}
public BibEntry getNewEntry() {
return newEntry;
}
}
| 495
| 21.545455
| 68
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/BibStringDiff.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibtexString;
public class BibStringDiff {
private final BibtexString originalString;
private final BibtexString newString;
BibStringDiff(BibtexString originalString, BibtexString newString) {
this.originalString = originalString;
this.newString = newString;
}
public static List<BibStringDiff> compare(BibDatabase originalDatabase, BibDatabase newDatabase) {
if (originalDatabase.hasNoStrings() && newDatabase.hasNoStrings()) {
return Collections.emptyList();
}
List<BibStringDiff> differences = new ArrayList<>();
Set<BibtexString> used = new HashSet<>();
Set<BibtexString> notMatched = new HashSet<>();
// First try to match by string names.
for (BibtexString original : originalDatabase.getStringValues()) {
Optional<BibtexString> match = newDatabase
.getStringValues().stream()
.filter(test -> test.getName().equals(original.getName()))
.findAny();
if (match.isPresent()) {
// We have found a string with a matching name.
if (!Objects.equals(original.getContent(), match.get().getContent())) {
// But they have non-matching contents, so we've found a change.
differences.add(new BibStringDiff(original, match.get()));
}
used.add(match.get());
} else {
// No match for this string.
notMatched.add(original);
}
}
// See if we can detect a name change for those entries that we couldn't match, based on their content
for (Iterator<BibtexString> iterator = notMatched.iterator(); iterator.hasNext(); ) {
BibtexString original = iterator.next();
Optional<BibtexString> match = newDatabase
.getStringValues().stream()
.filter(test -> test.getContent().equals(original.getContent()))
.findAny();
if (match.isPresent()) {
// We have found a string with the same content. It cannot have the same
// name, or we would have found it above.
differences.add(new BibStringDiff(original, match.get()));
iterator.remove();
used.add(match.get());
}
}
// Strings that are still not found must have been removed.
for (BibtexString original : notMatched) {
differences.add(new BibStringDiff(original, null));
}
// Finally, see if there are remaining strings in the new database. They must have been added.
newDatabase.getStringValues().stream()
.filter(test -> !used.contains(test))
.forEach(newString -> differences.add(new BibStringDiff(null, newString)));
return differences;
}
public BibtexString getOriginalString() {
return originalString;
}
public BibtexString getNewString() {
return newString;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if ((other == null) || (getClass() != other.getClass())) {
return false;
}
BibStringDiff that = (BibStringDiff) other;
return Objects.equals(newString, that.newString) && Objects.equals(originalString, that.originalString);
}
@Override
public int hashCode() {
return Objects.hash(originalString, newString);
}
}
| 3,940
| 34.827273
| 112
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/BibtexStringComparator.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Comparator;
import java.util.Locale;
import org.jabref.model.entry.BibtexString;
public class BibtexStringComparator implements Comparator<BibtexString> {
private final boolean considerRefs;
/**
* @param considerRefs Indicates whether the strings should be
* sorted according to internal references in addition to
* alphabetical sorting.
*/
public BibtexStringComparator(boolean considerRefs) {
this.considerRefs = considerRefs;
}
@Override
public int compare(BibtexString s1, BibtexString s2) {
int res;
// First check their names:
String name1 = s1.getName().toLowerCase(Locale.ROOT);
String name2 = s2.getName().toLowerCase(Locale.ROOT);
res = name1.compareTo(name2);
if (res == 0) {
return res;
}
// Then, if we are supposed to, see if the ordering needs
// to be changed because of one string referring to the other.x
if (considerRefs) {
// First order them:
BibtexString pre;
BibtexString post;
if (res < 0) {
pre = s1;
post = s2;
} else {
pre = s2;
post = s1;
}
// Then see if "pre" refers to "post", which is the only
// situation when we must change the ordering:
String namePost = post.getName().toLowerCase(Locale.ROOT);
String textPre = pre.getContent().toLowerCase(Locale.ROOT);
// If that is the case, reverse the order found:
if (textPre.contains("#" + namePost + "#")) {
res = -res;
}
}
return res;
}
}
| 1,833
| 28.111111
| 81
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/CrossRefEntryComparator.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Comparator;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
/**
* Compares Bibtex entries based on their 'crossref' fields. Entries including
* this field are deemed smaller than entries without this field. This serves
* the purpose of always placing referenced entries after referring entries in
* the .bib file. After this criterion comes comparisons of individual fields.
*/
public class CrossRefEntryComparator implements Comparator<BibEntry> {
@Override
public int compare(BibEntry e1, BibEntry e2) {
boolean crEntry1 = e1.hasField(StandardField.CROSSREF);
boolean crEntry2 = e2.hasField(StandardField.CROSSREF);
if ((crEntry1 && crEntry2) || (!crEntry1 && !crEntry2)) {
return 0;
}
if (!crEntry1) {
return 1;
} else {
return -1;
}
}
}
| 960
| 29.03125
| 78
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/EntryComparator.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Comparator;
import java.util.Locale;
import java.util.Objects;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldProperty;
import org.jabref.model.entry.field.InternalField;
/**
* This implementation of Comparator takes care of most of the details of sorting BibTeX entries in JabRef. It is
* structured as a node in a linked list of comparators, where each node can contain a link to a new comparator that
* decides the ordering (by recursion) if this one can't find a difference. The next node, if any, is given at
* construction time, and an arbitrary number of nodes can be included. If the entries are equal by this comparator, and
* there is no next entry, the entries' unique IDs will decide the ordering.
*/
public class EntryComparator implements Comparator<BibEntry> {
private final Field sortField;
private final boolean descending;
private final boolean binary;
private final Comparator<BibEntry> next;
/**
*
* @param binary true: the presence of fields is checked; false: the content of the fields is compared
* @param descending true: if the most different entry should get the highest score
* @param field the field to sort on
* @param next the next comparator to use (if the current comparator results in equality)
*/
public EntryComparator(boolean binary, boolean descending, Field field, Comparator<BibEntry> next) {
this.binary = binary;
this.sortField = field;
this.descending = descending;
this.next = next;
}
public EntryComparator(boolean binary, boolean descending, Field field) {
this.binary = binary;
this.sortField = field;
this.descending = descending;
this.next = null;
}
@Override
public int compare(BibEntry e1, BibEntry e2) {
// default equals
// TODO: with the new default equals this does not only return 0 for identical objects,
// but for all objects that have the same id and same fields
if (Objects.equals(e1, e2)) {
return 0;
}
Object f1 = e1.getField(sortField).orElse(null);
Object f2 = e2.getField(sortField).orElse(null);
if (binary) {
// We just separate on set and unset fields:
if (f1 == null) {
return f2 == null ? (next == null ? idCompare(e1, e2) : next.compare(e1, e2)) : 1;
} else {
return f2 == null ? -1 : (next == null ? idCompare(e1, e2) : next.compare(e1, e2));
}
}
// If the field is author or editor, we rearrange names to achieve that they are
// sorted according to last name.
if (sortField.getProperties().contains(FieldProperty.PERSON_NAMES)) {
if (f1 != null) {
f1 = AuthorList.fixAuthorForAlphabetization((String) f1).toLowerCase(Locale.ROOT);
}
if (f2 != null) {
f2 = AuthorList.fixAuthorForAlphabetization((String) f2).toLowerCase(Locale.ROOT);
}
} else if (sortField.equals(InternalField.TYPE_HEADER)) {
// Sort by type.
f1 = e1.getType();
f2 = e2.getType();
} else if (sortField.equals(InternalField.KEY_FIELD)) {
f1 = e1.getCitationKey().orElse(null);
f2 = e2.getCitationKey().orElse(null);
} else if (sortField.isNumeric()) {
try {
Integer i1 = Integer.parseInt((String) f1);
Integer i2 = Integer.parseInt((String) f2);
// Ok, parsing was successful. Update f1 and f2:
f1 = i1;
f2 = i2;
} catch (NumberFormatException ex) {
// Parsing failed. Give up treating these as numbers.
// TODO: should we check which of them failed, and sort based on that?
}
}
if (f2 == null) {
if (f1 == null) {
return next == null ? idCompare(e1, e2) : next.compare(e1, e2);
} else {
return -1;
}
}
if (f1 == null) { // f2 != null here automatically
return 1;
}
int result;
if ((f1 instanceof Integer f1i) && (f2 instanceof Integer f2i)) {
result = f1i.compareTo(f2i);
} else if (f2 instanceof Integer integer) {
Integer f1AsInteger = Integer.valueOf(f1.toString());
result = f1AsInteger.compareTo(integer);
} else if (f1 instanceof Integer integer) {
Integer f2AsInteger = Integer.valueOf(f2.toString());
result = integer.compareTo(f2AsInteger);
} else {
String ours = ((String) f1).toLowerCase(Locale.ROOT);
String theirs = ((String) f2).toLowerCase(Locale.ROOT);
int comp = ours.compareTo(theirs);
result = comp;
}
if (result != 0) {
return descending ? -result : result; // Primary sort.
}
if (next == null) {
return idCompare(e1, e2); // If still equal, we use the unique IDs.
} else {
return next.compare(e1, e2); // Secondary sort if existent.
}
}
private static int idCompare(BibEntry b1, BibEntry b2) {
return b1.getId().compareTo(b2.getId());
}
}
| 5,519
| 38.428571
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/FieldComparator.java
|
package org.jabref.logic.bibtex.comparator;
import java.text.Collator;
import java.text.ParseException;
import java.text.RuleBasedCollator;
import java.util.Comparator;
import java.util.Locale;
import java.util.Optional;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldProperty;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.OrFields;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.metadata.SaveOrder;
import org.jabref.model.strings.StringUtil;
/**
* A comparator for BibEntry fields
*/
public class FieldComparator implements Comparator<BibEntry> {
private static final Collator COLLATOR = getCollator();
enum FieldType {
NAME, TYPE, YEAR, MONTH, OTHER
}
private final OrFields fields;
private final FieldType fieldType;
private final boolean isNumeric;
private final int multiplier;
public FieldComparator(Field field) {
this(new OrFields(field), false);
}
public FieldComparator(SaveOrder.SortCriterion sortCriterion) {
this(new OrFields(sortCriterion.field), sortCriterion.descending);
}
public FieldComparator(OrFields fields, boolean descending) {
this.fields = fields;
fieldType = determineFieldType();
isNumeric = this.fields.getPrimary().isNumeric();
multiplier = descending ? -1 : 1;
}
private static Collator getCollator() {
try {
return new RuleBasedCollator(
((RuleBasedCollator) Collator.getInstance()).getRules().replace("<'\u005f'", "<' '<'\u005f'"));
} catch (ParseException e) {
return Collator.getInstance();
}
}
private FieldType determineFieldType() {
if (InternalField.TYPE_HEADER == this.fields.getPrimary()) {
return FieldType.TYPE;
} else if (this.fields.getPrimary().getProperties().contains(FieldProperty.PERSON_NAMES)) {
return FieldType.NAME;
} else if (StandardField.YEAR == this.fields.getPrimary()) {
return FieldType.YEAR;
} else if (StandardField.MONTH == this.fields.getPrimary()) {
return FieldType.MONTH;
} else {
return FieldType.OTHER;
}
}
private String getFieldValue(BibEntry entry) {
for (Field aField : fields) {
Optional<String> o = entry.getFieldOrAliasLatexFree(aField);
if (o.isPresent()) {
return o.get();
}
}
return null;
}
@Override
public int compare(BibEntry e1, BibEntry e2) {
String f1;
String f2;
if (fieldType == FieldType.TYPE) {
// Sort by type.
f1 = e1.getType().getDisplayName();
f2 = e2.getType().getDisplayName();
} else {
// If the field is author or editor, we rearrange names so they are
// sorted according to last name.
f1 = getFieldValue(e1);
f2 = getFieldValue(e2);
}
// Catch all cases involving null:
if ((f1 == null) && (f2 == null)) {
return 0;
} else if (f1 == null) {
return multiplier;
} else if (f2 == null) {
return -multiplier;
}
// Now we know that both f1 and f2 are != null
if (fieldType == FieldType.NAME) {
f1 = AuthorList.fixAuthorForAlphabetization(f1);
f2 = AuthorList.fixAuthorForAlphabetization(f2);
} else if (fieldType == FieldType.YEAR) {
Integer f1year = StringUtil.intValueOfOptional(f1).orElse(0);
Integer f2year = StringUtil.intValueOfOptional(f2).orElse(0);
int comparisonResult = Integer.compare(f1year, f2year);
return comparisonResult * multiplier;
} else if (fieldType == FieldType.MONTH) {
int month1 = Month.parse(f1).map(Month::getNumber).orElse(-1);
int month2 = Month.parse(f2).map(Month::getNumber).orElse(-1);
return Integer.compare(month1, month2) * multiplier;
}
if (isNumeric) {
Optional<Integer> i1 = StringUtil.intValueOfOptional(f1);
Optional<Integer> i2 = StringUtil.intValueOfOptional(f2);
if ((i2.isPresent()) && (i1.isPresent())) {
// Ok, parsing was successful. Update f1 and f2:
return i1.get().compareTo(i2.get()) * multiplier;
} else if (i1.isPresent()) {
// The first one was parsable, but not the second one.
// This means we consider one < two
return -1 * multiplier;
} else if (i2.isPresent()) {
// The second one was parsable, but not the first one.
// This means we consider one > two
return multiplier;
}
// Else none of them were parseable, and we can fall back on comparing strings.
}
String ours = f1.toLowerCase(Locale.ENGLISH);
String theirs = f2.toLowerCase(Locale.ENGLISH);
return COLLATOR.compare(ours, theirs) * multiplier;
}
}
| 5,308
| 34.630872
| 115
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/FieldComparatorStack.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Comparator;
import java.util.List;
/**
* This class represents a list of comparators. The first Comparator takes precedence,
* and each time a Comparator returns 0, the next one is attempted. If all comparators
* return 0 the final result will be 0.
*/
public class FieldComparatorStack<T> implements Comparator<T> {
private final List<? extends Comparator<? super T>> comparators;
public FieldComparatorStack(List<? extends Comparator<? super T>> comparators) {
this.comparators = comparators;
}
@Override
public int compare(T o1, T o2) {
for (Comparator<? super T> comp : comparators) {
int res = comp.compare(o1, o2);
if (res != 0) {
return res;
}
}
return 0;
}
}
| 845
| 27.2
| 86
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/GroupDiff.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Optional;
import org.jabref.model.groups.GroupTreeNode;
import org.jabref.model.metadata.MetaData;
public class GroupDiff {
private final GroupTreeNode originalGroupRoot;
private final GroupTreeNode newGroupRoot;
GroupDiff(GroupTreeNode originalGroupRoot, GroupTreeNode newGroupRoot) {
this.originalGroupRoot = originalGroupRoot;
this.newGroupRoot = newGroupRoot;
}
/**
* This method only detects whether a change took place or not. It does not determine the type of change. This would
* be possible, but difficult to do properly, so we rather only report the change.
*/
public static Optional<GroupDiff> compare(MetaData originalMetaData, MetaData newMetaData) {
final Optional<GroupTreeNode> originalGroups = originalMetaData.getGroups();
final Optional<GroupTreeNode> newGroups = newMetaData.getGroups();
if (!originalGroups.equals(newGroups)) {
return Optional.of(new GroupDiff(originalGroups.orElse(null), newGroups.orElse(null)));
} else {
return Optional.empty();
}
}
public GroupTreeNode getOriginalGroupRoot() {
return originalGroupRoot;
}
public GroupTreeNode getNewGroupRoot() {
return newGroupRoot;
}
}
| 1,339
| 32.5
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/IdComparator.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Comparator;
import org.jabref.model.entry.BibEntry;
/**
* Comparator for sorting BibEntry objects based on their ID. This
* can be used to sort entries back into the order they were created,
* provided the IDs given to entries are lexically monotonically increasing.
*/
public class IdComparator implements Comparator<BibEntry> {
@Override
public int compare(BibEntry one, BibEntry two) {
return one.getId().compareTo(two.getId());
}
}
| 525
| 26.684211
| 76
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/MetaDataDiff.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.EnumSet;
import java.util.Objects;
import java.util.Optional;
import org.jabref.model.metadata.MetaData;
import org.jabref.preferences.PreferencesService;
public class MetaDataDiff {
public enum Difference {
PROTECTED,
GROUPS_ALTERED,
ENCODING,
SAVE_SORT_ORDER,
KEY_PATTERNS,
USER_FILE_DIRECTORY,
LATEX_FILE_DIRECTORY,
DEFAULT_KEY_PATTERN,
SAVE_ACTIONS,
MODE,
GENERAL_FILE_DIRECTORY,
CONTENT_SELECTOR
}
private final Optional<GroupDiff> groupDiff;
private final MetaData originalMetaData;
private final MetaData newMetaData;
private MetaDataDiff(MetaData originalMetaData, MetaData newMetaData) {
this.originalMetaData = originalMetaData;
this.newMetaData = newMetaData;
this.groupDiff = GroupDiff.compare(originalMetaData, newMetaData);
}
public static Optional<MetaDataDiff> compare(MetaData originalMetaData, MetaData newMetaData) {
if (originalMetaData.equals(newMetaData)) {
return Optional.empty();
} else {
return Optional.of(new MetaDataDiff(originalMetaData, newMetaData));
}
}
/**
* Should be kept in sync with {@link MetaData#equals(Object)}
*/
public EnumSet<Difference> getDifferences(PreferencesService preferences) {
EnumSet<Difference> changes = EnumSet.noneOf(Difference.class);
if (originalMetaData.isProtected() != newMetaData.isProtected()) {
changes.add(Difference.PROTECTED);
}
if (!Objects.equals(originalMetaData.getGroups(), newMetaData.getGroups())) {
changes.add(Difference.GROUPS_ALTERED);
}
if (!Objects.equals(originalMetaData.getEncoding(), newMetaData.getEncoding())) {
changes.add(Difference.ENCODING);
}
if (!Objects.equals(originalMetaData.getSaveOrderConfig(), newMetaData.getSaveOrderConfig())) {
changes.add(Difference.SAVE_SORT_ORDER);
}
if (!Objects.equals(
originalMetaData.getCiteKeyPattern(preferences.getCitationKeyPatternPreferences().getKeyPattern()),
newMetaData.getCiteKeyPattern(preferences.getCitationKeyPatternPreferences().getKeyPattern()))) {
changes.add(Difference.KEY_PATTERNS);
}
if (!Objects.equals(originalMetaData.getUserFileDirectories(), newMetaData.getUserFileDirectories())) {
changes.add(Difference.USER_FILE_DIRECTORY);
}
if (!Objects.equals(originalMetaData.getLatexFileDirectories(), newMetaData.getLatexFileDirectories())) {
changes.add(Difference.LATEX_FILE_DIRECTORY);
}
if (!Objects.equals(originalMetaData.getDefaultCiteKeyPattern(), newMetaData.getDefaultCiteKeyPattern())) {
changes.add(Difference.DEFAULT_KEY_PATTERN);
}
if (!Objects.equals(originalMetaData.getSaveActions(), newMetaData.getSaveActions())) {
changes.add(Difference.SAVE_ACTIONS);
}
if (!originalMetaData.getMode().equals(newMetaData.getMode())) {
changes.add(Difference.MODE);
}
if (!Objects.equals(originalMetaData.getDefaultFileDirectory(), newMetaData.getDefaultFileDirectory())) {
changes.add(Difference.GENERAL_FILE_DIRECTORY);
}
if (!Objects.equals(originalMetaData.getContentSelectors(), newMetaData.getContentSelectors())) {
changes.add(Difference.CONTENT_SELECTOR);
}
return changes;
}
public MetaData getNewMetaData() {
return newMetaData;
}
public Optional<GroupDiff> getGroupDifferences() {
return groupDiff;
}
}
| 3,780
| 37.191919
| 115
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bibtex/comparator/PreambleDiff.java
|
package org.jabref.logic.bibtex.comparator;
import java.util.Objects;
import java.util.Optional;
import org.jabref.model.database.BibDatabaseContext;
public class PreambleDiff {
private final String originalPreamble;
private final String newPreamble;
PreambleDiff(String originalPreamble, String newPreamble) {
this.originalPreamble = originalPreamble;
this.newPreamble = newPreamble;
}
public static Optional<PreambleDiff> compare(BibDatabaseContext originalDatabase, BibDatabaseContext newDatabase) {
Optional<String> originalPreamble = originalDatabase.getDatabase().getPreamble();
Optional<String> newPreamble = newDatabase.getDatabase().getPreamble();
if (originalPreamble.equals(newPreamble)) {
return Optional.empty();
} else {
return Optional.of(new PreambleDiff(originalPreamble.orElse(""), newPreamble.orElse("")));
}
}
public String getNewPreamble() {
return newPreamble;
}
public String getOriginalPreamble() {
return originalPreamble;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if ((other == null) || (getClass() != other.getClass())) {
return false;
}
PreambleDiff that = (PreambleDiff) other;
return Objects.equals(newPreamble, that.newPreamble) && Objects.equals(originalPreamble, that.originalPreamble);
}
@Override
public int hashCode() {
return Objects.hash(originalPreamble, newPreamble);
}
}
| 1,607
| 28.777778
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstEntry.java
|
package org.jabref.logic.bst;
import java.util.HashMap;
import java.util.Map;
import org.jabref.model.entry.BibEntry;
public class BstEntry {
public final BibEntry entry;
public final Map<String, String> localStrings = new HashMap<>();
public final Map<String, String> fields = new HashMap<>();
public final Map<String, Integer> localIntegers = new HashMap<>();
public BstEntry(BibEntry e) {
this.entry = e;
}
}
| 452
| 19.590909
| 70
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstFunctions.java
|
package org.jabref.logic.bst;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Stack;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.bst.util.BstCaseChanger;
import org.jabref.logic.bst.util.BstNameFormatter;
import org.jabref.logic.bst.util.BstPurifier;
import org.jabref.logic.bst.util.BstTextPrefixer;
import org.jabref.logic.bst.util.BstWidthCalculator;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ParseTree;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class BstFunctions {
private static final Logger LOGGER = LoggerFactory.getLogger(BstFunctions.class);
private static final Pattern ADD_PERIOD_PATTERN = Pattern.compile("([^.?!}\\s])(}|\\s)*$");
private final Map<String, String> strings;
private final Map<String, Integer> integers;
private final Map<String, BstFunction> functions;
private final String preamble;
private final Stack<Object> stack;
private final StringBuilder bbl;
private int bstWarning = 0;
@FunctionalInterface
public interface BstFunction {
void execute(BstVMVisitor visitor, ParserRuleContext ctx);
default void execute(BstVMVisitor visitor, ParserRuleContext ctx, BstEntry bstEntryContext) {
this.execute(visitor, ctx);
}
}
public BstFunctions(BstVMContext bstVMContext,
StringBuilder bbl) {
this.strings = bstVMContext.strings();
this.integers = bstVMContext.integers();
this.functions = bstVMContext.functions();
this.preamble = Optional.ofNullable(bstVMContext.bibDatabase()).flatMap(BibDatabase::getPreamble).orElse("");
this.stack = bstVMContext.stack();
this.bbl = bbl;
}
protected Map<String, BstFunction> getBuiltInFunctions() {
Map<String, BstFunction> builtInFunctions = new HashMap<>();
builtInFunctions.put(">", this::bstIsGreaterThan);
builtInFunctions.put("<", this::bstIsLowerThan);
builtInFunctions.put("=", this::bstEquals);
builtInFunctions.put("+", this::bstAdd);
builtInFunctions.put("-", this::bstSubtract);
builtInFunctions.put("*", this::bstConcat);
builtInFunctions.put(":=", new BstAssignFunction());
builtInFunctions.put("add.period$", this::bstAddPeriod);
builtInFunctions.put("call.type$", new BstCallTypeFunction());
builtInFunctions.put("change.case$", this::bstChangeCase);
builtInFunctions.put("chr.to.int$", this::bstChrToInt);
builtInFunctions.put("cite$", new BstCiteFunction());
builtInFunctions.put("duplicate$", this::bstDuplicate);
builtInFunctions.put("empty$", this::bstEmpty);
builtInFunctions.put("format.name$", this::bstFormatName);
builtInFunctions.put("if$", this::bstIf);
builtInFunctions.put("int.to.chr$", this::bstIntToChr);
builtInFunctions.put("int.to.str$", this::bstIntToStr);
builtInFunctions.put("missing$", this::bstMissing);
builtInFunctions.put("newline$", this::bstNewLine);
builtInFunctions.put("num.names$", this::bstNumNames);
builtInFunctions.put("pop$", this::bstPop);
builtInFunctions.put("preamble$", this::bstPreamble);
builtInFunctions.put("purify$", this::bstPurify);
builtInFunctions.put("quote$", this::bstQuote);
builtInFunctions.put("skip$", this::bstSkip);
builtInFunctions.put("stack$", this::bstStack);
builtInFunctions.put("substring$", this::bstSubstring);
builtInFunctions.put("swap$", this::bstSwap);
builtInFunctions.put("text.length$", this::bstTextLength);
builtInFunctions.put("text.prefix$", this::bstTextPrefix);
builtInFunctions.put("top$", this::bstTop);
builtInFunctions.put("type$", new BstTypeFunction());
builtInFunctions.put("warning$", this::bstWarning);
builtInFunctions.put("while$", this::bstWhile);
builtInFunctions.put("width$", this::bstWidth);
builtInFunctions.put("write$", this::bstWrite);
return builtInFunctions;
}
/**
* Pops the top two (integer) literals, compares them, and pushes
* the integer 1 if the second is greater than the first, 0
* otherwise.
*/
private void bstIsGreaterThan(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation > (line %d)".formatted(ctx.start.getLine()));
}
Object o2 = stack.pop();
Object o1 = stack.pop();
if (!((o1 instanceof Integer) && (o2 instanceof Integer))) {
throw new BstVMException("Can only compare two integers with >");
}
stack.push(((Integer) o1).compareTo((Integer) o2) > 0 ? BstVM.TRUE : BstVM.FALSE);
}
/**
* Pops the top two (integer) literals, compares them, and pushes
* the integer 1 if the second is lower than the first, 0
* otherwise.
*/
private void bstIsLowerThan(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation <");
}
Object o2 = stack.pop();
Object o1 = stack.pop();
if (!((o1 instanceof Integer) && (o2 instanceof Integer))) {
throw new BstVMException("Can only compare two integers with < (line %d)".formatted(ctx.start.getLine()));
}
stack.push(((Integer) o1).compareTo((Integer) o2) < 0 ? BstVM.TRUE : BstVM.FALSE);
}
/**
* Pops the top two (both integer or both string) literals, compares
* them, and pushes the integer 1 if they're equal, 0 otherwise.
*/
private void bstEquals(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation = (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
Object o2 = stack.pop();
if ((o1 == null) ^ (o2 == null)) {
stack.push(BstVM.FALSE);
return;
}
if ((o1 == null) && (o2 == null)) {
stack.push(BstVM.TRUE);
return;
}
stack.push(o1.equals(o2) ? BstVM.TRUE : BstVM.FALSE);
}
/**
* Pops the top two (integer) literals and pushes their sum.
*/
private void bstAdd(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation + (line %d)".formatted(ctx.start.getLine()));
}
Object o2 = stack.pop();
Object o1 = stack.pop();
if (!((o1 instanceof Integer) && (o2 instanceof Integer))) {
throw new BstVMException("Can only compare two integers with + (line %d)".formatted(ctx.start.getLine()));
}
stack.push((Integer) o1 + (Integer) o2);
}
/**
* Pops the top two (integer) literals and pushes their difference
* (the first subtracted from the second).
*/
private void bstSubtract(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation - (line %d)".formatted(ctx.start.getLine()));
}
Object o2 = stack.pop();
Object o1 = stack.pop();
if (!((o1 instanceof Integer) && (o2 instanceof Integer))) {
throw new BstVMException("Can only subtract two integers with - (line %d)".formatted(ctx.start.getLine()));
}
stack.push((Integer) o1 - (Integer) o2);
}
/**
* Pops the top two (string) literals, concatenates them (in reverse
* order, that is, the order in which pushed), and pushes the
* resulting string.
*/
private void bstConcat(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation * (line %d)".formatted(ctx.start.getLine()));
}
Object o2 = stack.pop();
Object o1 = stack.pop();
if (o1 == null) {
o1 = "";
}
if (o2 == null) {
o2 = "";
}
if (!((o1 instanceof String) && (o2 instanceof String))) {
LOGGER.error("o1: {} ({})", o1, o1.getClass());
LOGGER.error("o2: {} ({})", o2, o2.getClass());
throw new BstVMException("Can only concatenate two String with * (line %d)".formatted(ctx.start.getLine()));
}
stack.push(o1.toString() + o2);
}
/**
* Pops the top two literals and assigns to the first (which must be
* a global or entry variable) the value of the second.
*/
public class BstAssignFunction implements BstFunction {
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx) {
this.execute(visitor, ctx, null);
}
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx, BstEntry bstEntry) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation := (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
Object o2 = stack.pop();
if (!(o1 instanceof BstVMVisitor.Identifier identifier)) {
throw new BstVMException("Invalid parameters (line %d)".formatted(ctx.start.getLine()));
}
String name = identifier.name();
if (o2 instanceof String value) {
if ((bstEntry != null) && bstEntry.localStrings.containsKey(name)) {
bstEntry.localStrings.put(name, value);
return;
}
if (strings.containsKey(name)) {
strings.put(name, value);
}
} else if (o2 instanceof Integer value) {
if ((bstEntry != null) && bstEntry.localIntegers.containsKey(name)) {
bstEntry.localIntegers.put(name, value);
return;
}
if (integers.containsKey(name)) {
integers.put(name, value);
}
} else {
throw new BstVMException("Invalid parameters (line %d)".formatted(ctx.start.getLine()));
}
}
}
/**
* Pops the top (string) literal, adds a `.' to it if the last non
* '}' character isn't a `.', `?', or `!', and pushes this resulting
* string.
*/
private void bstAddPeriod(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation add.period$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof String s)) {
throw new BstVMException("Can only add a period to a string for add.period$ (line %d)".formatted(ctx.start.getLine()));
}
Matcher m = ADD_PERIOD_PATTERN.matcher(s);
if (m.find()) {
StringBuilder sb = new StringBuilder();
m.appendReplacement(sb, m.group(1));
sb.append('.');
String group2 = m.group(2);
if (group2 != null) {
sb.append(m.group(2));
}
stack.push(sb.toString());
} else {
stack.push(s);
}
}
/**
* Executes the function whose name is the entry type of an entry.
* For example if an entry is of type book, this function executes
* the book function. When given as an argument to the ITERATE
* command, call.type$ actually produces the output for the entries.
* For an entry with an unknown type, it executes the function
* default.type. Thus you should define (before the READ command)
* one function for each standard entry type as well as a
* default.type function.
*/
public class BstCallTypeFunction implements BstFunction {
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx) {
throw new BstVMException("Call.type$ can only be called from within a context (ITERATE or REVERSE). (line %d)".formatted(ctx.start.getLine()));
}
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx, BstEntry bstEntry) {
if (bstEntry == null) {
this.execute(visitor, ctx); // Throw error
} else {
functions.get(bstEntry.entry.getType().getName()).execute(visitor, ctx, bstEntry);
}
}
}
/**
* Pops the top two (string) literals; it changes the case of the second
* according to the specifications of the first, as follows. (Note: The word
* `letters' in the next sentence refers only to those at brace-level 0, the
* top-most brace level; no other characters are changed, except perhaps for
* \special characters", described in Section 4.) If the first literal is the
* string `t', it converts to lower case all letters except the very first
* character in the string, which it leaves alone, and except the first
* character following any colon and then nonnull white space, which it also
* leaves alone; if it's the string `l', it converts all letters to lower case;
* and if it's the string `u', it converts all letters to upper case. It then
* pushes this resulting string. If either type is incorrect, it complains and
* pushes the null string; however, if both types are correct but the
* specification string (i.e., the first string) isn't one of the legal ones, it
* merely pushes the second back onto the stack, after complaining. (Another
* note: It ignores case differences in the specification string; for example,
* the strings t and T are equivalent for the purposes of this built-in
* function.)
*/
private void bstChangeCase(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation change.case$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!((o1 instanceof String format) && (format.length() == 1))) {
throw new BstVMException("A format string of length 1 is needed for change.case$ (line %d)".formatted(ctx.start.getLine()));
}
Object o2 = stack.pop();
if (!(o2 instanceof String toChange)) {
throw new BstVMException("A string is needed as second parameter for change.case$ (line %d)".formatted(ctx.start.getLine()));
}
stack.push(BstCaseChanger.changeCase(toChange, BstCaseChanger.FormatMode.of(format)));
}
/**
* Pops the top (string) literal, makes sure it's a single
* character, converts it to the corresponding ASCII integer, and
* pushes this integer.
*/
private void bstChrToInt(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation chr.to.int$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!((o1 instanceof String s) && (((String) o1).length() == 1))) {
throw new BstVMException("Can only perform chr.to.int$ on string with length 1 (line %d)".formatted(ctx.start.getLine()));
}
stack.push((int) s.charAt(0));
}
/**
* Pushes the string that was the \cite-command argument for this
* entry.
*/
public class BstCiteFunction implements BstFunction {
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx) {
throw new BstVMException("Must have an entry to cite$ (line %d)".formatted(ctx.start.getLine()));
}
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx, BstEntry bstEntryContext) {
if (bstEntryContext == null) {
execute(visitor, ctx);
return;
}
stack.push(bstEntryContext.entry.getCitationKey().orElse(null));
}
}
/**
* Pops the top literal from the stack and pushes two copies of it.
*/
private void bstDuplicate(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation duplicate$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
stack.push(o1);
stack.push(o1);
}
/**
* Pops the top literal and pushes the integer 1 if it's a missing
* field or a string having no non-white-space characters, 0
* otherwise.
*/
private void bstEmpty(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation empty$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (o1 == null) {
stack.push(BstVM.TRUE);
return;
}
if (!(o1 instanceof String s)) {
throw new BstVMException("Operand does not match function empty$ (line %d)".formatted(ctx.start.getLine()));
}
stack.push("".equals(s.trim()) ? BstVM.TRUE : BstVM.FALSE);
}
/**
* The |built_in| function {\.{format.name\$}} pops the
* top three literals (they are a string, an integer, and a string
* literal, in that order). The last string literal represents a
* name list (each name corresponding to a person), the integer
* literal specifies which name to pick from this list, and the
* first string literal specifies how to format this name, as
* described in the \BibTeX\ documentation. Finally, this function
* pushes the formatted name. If any of the types is incorrect, it
* complains and pushes the null string.
*/
private void bstFormatName(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 3) {
throw new BstVMException("Not enough operands on stack for operation format.name$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
Object o2 = stack.pop();
Object o3 = stack.pop();
if (!(o1 instanceof String) && !(o2 instanceof Integer) && !(o3 instanceof String)) {
// warning("A string is needed for change.case$");
stack.push("");
return;
}
String format = (String) o1;
Integer name = (Integer) o2;
String names = (String) o3;
if (names == null) {
stack.push("");
} else {
AuthorList a = AuthorList.parse(names);
if (name > a.getNumberOfAuthors()) {
throw new BstVMException("Author Out of Bounds. Number %d invalid for %s (line %d)".formatted(name, names, ctx.start.getLine()));
}
Author author = a.getAuthor(name - 1);
stack.push(BstNameFormatter.formatName(author, format));
}
}
/**
* Pops the top three literals (they are two function literals and
* an integer literal, in that order); if the integer is greater
* than 0, it executes the second literal, else it executes the
* first.
*/
private void bstIf(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 3) {
throw new BstVMException("Not enough operands on stack for if$ (line %d)".formatted(ctx.start.getLine()));
}
Object f1 = stack.pop();
Object f2 = stack.pop();
Object i = stack.pop();
if (!((f1 instanceof BstVMVisitor.Identifier) || (f1 instanceof ParseTree))
&& ((f2 instanceof BstVMVisitor.Identifier) || (f2 instanceof ParseTree))
&& (i instanceof Integer)) {
throw new BstVMException("Expecting two functions and an integer for if$ (line %d)".formatted(ctx.start.getLine()));
}
if (((Integer) i) > 0) {
callIdentifierOrTree(f2, visitor, ctx);
} else {
callIdentifierOrTree(f1, visitor, ctx);
}
}
private void callIdentifierOrTree(Object f, BstVMVisitor visitor, ParserRuleContext ctx) {
if (f instanceof ParseTree tree) {
visitor.visit(tree);
} else if (f instanceof BstVMVisitor.Identifier identifier) {
visitor.resolveIdentifier(identifier.name(), ctx);
} else {
stack.push(f);
}
}
/**
* Pops the top (integer) literal, interpreted as the ASCII integer
* value of a single character, converts it to the corresponding
* single-character string, and pushes this string.
*/
private void bstIntToChr(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation int.to.chr$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof Integer i)) {
throw new BstVMException("Can only perform operation int.to.chr$ on an Integer (line %d)".formatted(ctx.start.getLine()));
}
stack.push(String.valueOf((char) i.intValue()));
}
/**
* Pops the top (integer) literal, converts it to its (unique)
* string equivalent, and pushes this string.
*/
private void bstIntToStr(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation int.to.str$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof Integer)) {
throw new BstVMException("Can only transform an integer to an string using int.to.str$ (line %d)".formatted(ctx.start.getLine()));
}
stack.push(o1.toString());
}
/**
* Pops the top literal and pushes the integer 1 if it's a missing
* field, 0 otherwise.
*/
private void bstMissing(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation missing$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (o1 == null) {
stack.push(BstVM.TRUE);
return;
}
if (!(o1 instanceof String)) {
LOGGER.warn("Not a string or missing field in operation missing$ (line %d)".formatted(ctx.start.getLine()));
stack.push(BstVM.TRUE);
return;
}
stack.push(BstVM.FALSE);
}
/**
* Writes onto the bbl file what is accumulated in the output buffer.
* It writes a blank line if and only if the output buffer is empty.
* Since write$ does reasonable line breaking, you should use this
* function only when you want a blank line or an explicit line
* break.
*/
private void bstNewLine(BstVMVisitor visitor, ParserRuleContext ctx) {
this.bbl.append('\n');
}
/**
* Pops the top (string) literal and pushes the number of names the
* string represents one plus the number of occurrences of the
* substring "and" (ignoring case differences) surrounded by
* non-null white-space at the top brace level.
*/
private void bstNumNames(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation num.names$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof String s)) {
throw new BstVMException("Need a string at the top of the stack for num.names$ (line %d)".formatted(ctx.start.getLine()));
}
stack.push(AuthorList.parse(s).getNumberOfAuthors());
}
/**
* Pops the top of the stack but doesn't print it; this gets rid of
* an unwanted stack literal.
*/
private void bstPop(BstVMVisitor visitor, ParserRuleContext ctx) {
stack.pop();
}
/**
* The |built_in| function {\.{preamble\$}} pushes onto the stack
* the concatenation of all the \.{preamble} strings read from the
* database files. (or the empty string if there were none)
* '@PREAMBLE' strings are read from the database files.
*/
private void bstPreamble(BstVMVisitor visitor, ParserRuleContext ctx) {
stack.push(preamble);
}
/**
* Pops the top (string) literal, removes nonalphanumeric characters
* except for white-space characters and hyphens and ties (these all get
* converted to a space), removes certain alphabetic characters
* contained in the control sequences associated with a \special
* character", and pushes the resulting string.
*/
private void bstPurify(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation purify$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof String)) {
LOGGER.warn("A string is needed for purify$");
stack.push("");
return;
}
stack.push(BstPurifier.purify((String) o1));
}
/**
* Pushes the string consisting of the double-quote character.
*/
private void bstQuote(BstVMVisitor visitor, ParserRuleContext ctx) {
stack.push("\"");
}
/**
* Does nothing.
*/
private void bstSkip(BstVMVisitor visitor, ParserRuleContext ctx) {
// no-op
}
/**
* Pops and prints the whole stack; it's meant to be used for style
* designers while debugging.
*/
private void bstStack(BstVMVisitor visitor, ParserRuleContext ctx) {
while (!stack.empty()) {
LOGGER.debug("Stack entry {}", stack.pop());
}
}
/**
* Pops the top three literals (they are the two integers literals
* len and start, and a string literal, in that order). It pushes
* the substring of the (at most) len consecutive characters
* starting at the startth character (assuming 1-based indexing) if
* start is positive, and ending at the start-th character
* (including) from the end if start is negative (where the first
* character from the end is the last character).
*/
private void bstSubstring(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 3) {
throw new BstVMException("Not enough operands on stack for operation substring$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
Object o2 = stack.pop();
Object o3 = stack.pop();
if (!((o1 instanceof Integer len) && (o2 instanceof Integer start) && (o3 instanceof String s))) {
throw new BstVMException("Expecting two integers and a string for substring$ (line %d)".formatted(ctx.start.getLine()));
}
int lenI = len;
int startI = start;
if (lenI > (Integer.MAX_VALUE / 2)) {
lenI = Integer.MAX_VALUE / 2;
}
if (startI > (Integer.MAX_VALUE / 2)) {
startI = Integer.MAX_VALUE / 2;
}
if (startI < (Integer.MIN_VALUE / 2)) {
startI = -Integer.MIN_VALUE / 2;
}
if (startI < 0) {
startI += s.length() + 1;
startI = Math.max(1, (startI + 1) - lenI);
}
stack.push(s.substring(startI - 1, Math.min((startI - 1) + lenI, s.length())));
}
/**
* Swaps the top two literals on the stack. text.length$ Pops the
* top (string) literal, and pushes the number of text characters
* it contains, where an accented character (more precisely, a
* \special character", defined in Section 4) counts as a single
* text character, even if it's missing its matching right brace,
* and where braces don't count as text characters.
*/
private void bstSwap(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation swap$ (line %d)".formatted(ctx.start.getLine()));
}
Object f1 = stack.pop();
Object f2 = stack.pop();
stack.push(f1);
stack.push(f2);
}
/**
* text.length$ Pops the top (string) literal, and pushes the number
* of text characters it contains, where an accented character (more
* precisely, a "special character", defined in Section 4) counts as
* a single text character, even if it's missing its matching right
* brace, and where braces don't count as text characters.
*
* From BibTeXing: For the purposes of counting letters in labels,
* BibTEX considers everything contained inside the braces as a
* single letter.
*/
private void bstTextLength(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation text.length$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof String s)) {
throw new BstVMException("Can only perform operation on a string text.length$ (line %d)".formatted(ctx.start.getLine()));
}
char[] c = s.toCharArray();
int result = 0;
int i = 0;
int n = s.length();
int braceLevel = 0;
while (i < n) {
i++;
if (c[i - 1] == '{') {
braceLevel++;
if ((braceLevel == 1) && (i < n)) {
if (c[i] == '\\') {
i++; // skip over backslash
while ((i < n) && (braceLevel > 0)) {
if (c[i] == '}') {
braceLevel--;
} else if (c[i] == '{') {
braceLevel++;
}
i++;
}
result++;
}
}
} else if (c[i - 1] == '}') {
if (braceLevel > 0) {
braceLevel--;
}
} else {
result++;
}
}
stack.push(result);
}
/**
* Pops the top two literals (the integer literal len and a string
* literal, in that order). It pushes the substring of the (at most) len
* consecutive text characters starting from the beginning of the
* string. This function is similar to substring$, but this one
* considers a \special character", even if it's missing its matching
* right brace, to be a single text character (rather than however many
* ASCII characters it actually comprises), and this function doesn't
* consider braces to be text characters; furthermore, this function
* appends any needed matching right braces.
*/
private void bstTextPrefix(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation text.prefix$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof Integer)) {
LOGGER.warn("An integer is needed as first parameter to text.prefix$ (line {})", ctx.start.getLine());
stack.push("");
return;
}
Object o2 = stack.pop();
if (!(o2 instanceof String)) {
LOGGER.warn("A string is needed as second parameter to text.prefix$ (line {})", ctx.start.getLine());
stack.push("");
return;
}
stack.push(BstTextPrefixer.textPrefix((Integer) o1, (String) o2));
}
/**
* Pops and prints the top of the stack to the log file. It's useful for debugging.
*/
private void bstTop(BstVMVisitor visitor, ParserRuleContext ctx) {
LOGGER.debug("Stack entry {} (line {})", stack.pop(), ctx.start.getLine());
}
/**
* Pushes the current entry's type (book, article, etc.), but pushes
* the null string if the type is either unknown or undefined.
*/
public class BstTypeFunction implements BstFunction {
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx) {
throw new BstVMException("type$ need a context (line %d)".formatted(ctx.start.getLine()));
}
@Override
public void execute(BstVMVisitor visitor, ParserRuleContext ctx, BstEntry bstEntryContext) {
if (bstEntryContext == null) {
this.execute(visitor, ctx);
return;
}
stack.push(bstEntryContext.entry.getType().getName());
}
}
/**
* Pops the top (string) literal and prints it following a warning
* message. This also increments a count of the number of warning
* messages issued.
*/
private void bstWarning(BstVMVisitor visitor, ParserRuleContext ctx) {
LOGGER.warn("Warning (#{}): {}", bstWarning++, stack.pop());
}
/**
* Pops the top two (function) literals, and keeps executing the
* second as long as the (integer) literal left on the stack by
* executing the first is greater than 0.
*/
private void bstWhile(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.size() < 2) {
throw new BstVMException("Not enough operands on stack for operation while$ (line %d)".formatted(ctx.start.getLine()));
}
Object f2 = stack.pop();
Object f1 = stack.pop();
if (!((f1 instanceof BstVMVisitor.Identifier) || (f1 instanceof ParseTree))
&& ((f2 instanceof BstVMVisitor.Identifier) || (f2 instanceof ParseTree))) {
throw new BstVMException("Expecting two functions for while$ (line %d)".formatted(ctx.start.getLine()));
}
do {
visitor.visit((ParseTree) f1);
Object i = stack.pop();
if (!(i instanceof Integer)) {
throw new BstVMException("First parameter to while has to return an integer but was %s (line %d)"
.formatted(i.toString(), ctx.start.getLine()));
}
if ((Integer) i <= 0) {
break;
}
visitor.visit((ParseTree) f2);
} while (true);
}
/**
* The |built_in| function {\.{width\$}} pops the top (string) literal and
* pushes the integer that represents its width in units specified by the
* |char_width| array. This function takes the literal literally; that is, it
* assumes each character in the string is to be printed as is, regardless of
* whether the character has a special meaning to \TeX, except that special
* characters (even without their |right_brace|s) are handled specially. If the
* literal isn't a string, it complains and pushes~0.
*/
private void bstWidth(BstVMVisitor visitor, ParserRuleContext ctx) {
if (stack.isEmpty()) {
throw new BstVMException("Not enough operands on stack for operation width$ (line %d)".formatted(ctx.start.getLine()));
}
Object o1 = stack.pop();
if (!(o1 instanceof String)) {
LOGGER.warn("A string is needed for width$");
stack.push(0);
return;
}
stack.push(BstWidthCalculator.width((String) o1));
}
/**
* Pops the top (string) literal and writes it on the output buffer
* (which will result in stuff being written onto the bbl file when
* the buffer fills up).
*/
private void bstWrite(BstVMVisitor visitor, ParserRuleContext ctx) {
String s = (String) stack.pop();
bbl.append(s);
}
}
| 36,591
| 38.261803
| 155
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstPreviewLayout.java
|
package org.jabref.logic.bst;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import org.jabref.logic.cleanup.ConvertToBibtexCleanup;
import org.jabref.logic.formatter.bibtexfields.RemoveNewlinesFormatter;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.layout.format.LatexToUnicodeFormatter;
import org.jabref.logic.layout.format.RemoveLatexCommandsFormatter;
import org.jabref.logic.layout.format.RemoveTilde;
import org.jabref.logic.preview.PreviewLayout;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class BstPreviewLayout implements PreviewLayout {
private static final Logger LOGGER = LoggerFactory.getLogger(BstPreviewLayout.class);
private final String name;
private BstVM bstVM;
private String error;
public BstPreviewLayout(Path path) {
name = path.getFileName().toString();
if (!Files.exists(path)) {
LOGGER.error("File {} not found", path.toAbsolutePath());
error = Localization.lang("Error opening file '%0'.", path.toString());
return;
}
try {
bstVM = new BstVM(path);
} catch (Exception e) {
LOGGER.error("Could not read {}.", path.toAbsolutePath(), e);
error = Localization.lang("Error opening file '%0'.", path.toString());
}
}
@Override
public String generatePreview(BibEntry originalEntry, BibDatabaseContext databaseContext) {
if (error != null) {
return error;
}
// ensure that the entry is of BibTeX format (and do not modify the original entry)
BibEntry entry = (BibEntry) originalEntry.clone();
new ConvertToBibtexCleanup().cleanup(entry);
String result = bstVM.render(List.of(entry));
// Remove all comments
result = result.replaceAll("%.*", "");
// Remove all LaTeX comments
// The RemoveLatexCommandsFormatter keeps the words inside latex environments. Therefore, we remove them manually
result = result.replace("\\begin{thebibliography}{1}", "");
result = result.replace("\\end{thebibliography}", "");
// The RemoveLatexCommandsFormatter keeps the word inside the latex command, but we want to remove that completely
result = result.replaceAll("\\\\bibitem[{].*[}]", "");
// We want to replace \newblock by a space instead of completely removing it
result = result.replace("\\newblock", " ");
// remove all latex commands statements - assumption: command in a separate line
result = result.replaceAll("(?m)^\\\\.*$", "");
// remove some IEEEtran.bst output (resulting from a multiline \providecommand)
result = result.replace("#2}}", "");
// Have quotes right - and more
result = new LatexToUnicodeFormatter().format(result);
result = result.replace("``", "\"");
result = result.replace("''", "\"");
// Final cleanup
result = new RemoveNewlinesFormatter().format(result);
result = new RemoveLatexCommandsFormatter().format(result);
result = new RemoveTilde().format(result);
result = result.trim().replaceAll(" +", " ");
return result;
}
@Override
public String getDisplayName() {
return name;
}
@Override
public String getName() {
return name;
}
}
| 3,498
| 38.314607
| 122
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstVM.java
|
package org.jabref.logic.bst;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import java.util.Stack;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.antlr.v4.runtime.BailErrorStrategy;
import org.antlr.v4.runtime.BaseErrorListener;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CharStreams;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
import org.antlr.v4.runtime.misc.ParseCancellationException;
import org.antlr.v4.runtime.tree.ParseTree;
public class BstVM {
protected static final Integer FALSE = 0;
protected static final Integer TRUE = 1;
protected final ParseTree tree;
protected BstVMContext latestContext; // for testing
private Path path = null;
public BstVM(Path path) throws RecognitionException, IOException {
this(CharStreams.fromPath(path));
this.path = path;
}
public BstVM(String s) throws RecognitionException {
this(CharStreams.fromString(s));
}
protected BstVM(CharStream bst) throws RecognitionException {
this(charStream2CommonTree(bst));
}
private BstVM(ParseTree tree) {
this.tree = tree;
}
private static ParseTree charStream2CommonTree(CharStream query) {
BstLexer lexer = new BstLexer(query);
lexer.removeErrorListeners();
lexer.addErrorListener(ThrowingErrorListener.INSTANCE);
BstParser parser = new BstParser(new CommonTokenStream(lexer));
parser.removeErrorListeners();
parser.addErrorListener(ThrowingErrorListener.INSTANCE);
parser.setErrorHandler(new BailErrorStrategy());
return parser.bstFile();
}
/**
* Transforms the given list of BibEntries to a rendered list of references using the parsed bst program
*
* @param bibEntries list of entries to convert
* @param bibDatabase (may be null) the bibDatabase used for resolving strings / crossref
* @return list of references in plain text form
*/
public String render(Collection<BibEntry> bibEntries, BibDatabase bibDatabase) {
Objects.requireNonNull(bibEntries);
List<BstEntry> entries = new ArrayList<>(bibEntries.size());
for (BibEntry entry : bibEntries) {
entries.add(new BstEntry(entry));
}
StringBuilder resultBuffer = new StringBuilder();
BstVMContext bstVMContext = new BstVMContext(entries, bibDatabase, path);
bstVMContext.functions().putAll(new BstFunctions(bstVMContext, resultBuffer).getBuiltInFunctions());
bstVMContext.integers().put("entry.max$", Integer.MAX_VALUE);
bstVMContext.integers().put("global.max$", Integer.MAX_VALUE);
BstVMVisitor bstVMVisitor = new BstVMVisitor(bstVMContext, resultBuffer);
bstVMVisitor.visit(tree);
latestContext = bstVMContext;
return resultBuffer.toString();
}
public String render(Collection<BibEntry> bibEntries) {
return render(bibEntries, null);
}
protected Stack<Object> getStack() {
if (latestContext != null) {
return latestContext.stack();
} else {
throw new BstVMException("BstVM must have rendered at least once to provide the latest stack");
}
}
private static class ThrowingErrorListener extends BaseErrorListener {
public static final ThrowingErrorListener INSTANCE = new ThrowingErrorListener();
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol,
int line, int charPositionInLine, String msg, RecognitionException e)
throws ParseCancellationException {
throw new ParseCancellationException("line " + line + ":" + charPositionInLine + " " + msg);
}
}
}
| 4,031
| 34.06087
| 108
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstVMContext.java
|
package org.jabref.logic.bst;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Stack;
import org.jabref.model.database.BibDatabase;
public record BstVMContext(List<BstEntry> entries,
Map<String, String> strings,
Map<String, Integer> integers,
Map<String, BstFunctions.BstFunction> functions,
Stack<Object> stack,
BibDatabase bibDatabase,
Optional<Path> path) {
public BstVMContext(List<BstEntry> entries, BibDatabase bibDatabase, Path path) {
this(entries, new HashMap<>(), new HashMap<>(), new HashMap<>(), new Stack<>(), bibDatabase, Optional.ofNullable(path));
}
}
| 842
| 35.652174
| 128
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstVMException.java
|
package org.jabref.logic.bst;
public class BstVMException extends RuntimeException {
public BstVMException(String string) {
super(string);
}
}
| 160
| 19.125
| 54
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/BstVMVisitor.java
|
package org.jabref.logic.bst;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import org.jabref.logic.bibtex.FieldPreferences;
import org.jabref.logic.bibtex.FieldWriter;
import org.jabref.logic.bibtex.InvalidFieldValueException;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.StandardField;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class BstVMVisitor extends BstBaseVisitor<Integer> {
private static final Logger LOGGER = LoggerFactory.getLogger(BstVMVisitor.class);
private final BstVMContext bstVMContext;
private final StringBuilder bbl;
private BstEntry selectedBstEntry = null;
public record Identifier(String name) {
}
public BstVMVisitor(BstVMContext bstVMContext, StringBuilder bbl) {
this.bstVMContext = bstVMContext;
this.bbl = bbl;
}
@Override
public Integer visitStringsCommand(BstParser.StringsCommandContext ctx) {
if (ctx.ids.identifier().size() > 20) {
throw new BstVMException("Strings limit reached");
}
for (BstParser.IdentifierContext identifierContext : ctx.ids.identifier()) {
bstVMContext.strings().put(identifierContext.getText(), null);
}
return BstVM.TRUE;
}
@Override
public Integer visitIntegersCommand(BstParser.IntegersCommandContext ctx) {
for (BstParser.IdentifierContext identifierContext : ctx.ids.identifier()) {
bstVMContext.integers().put(identifierContext.getText(), 0);
}
return BstVM.TRUE;
}
@Override
public Integer visitFunctionCommand(BstParser.FunctionCommandContext ctx) {
bstVMContext.functions().put(ctx.id.getText(),
(visitor, functionContext) -> visitor.visit(ctx.function));
return BstVM.TRUE;
}
@Override
public Integer visitMacroCommand(BstParser.MacroCommandContext ctx) {
String replacement = ctx.repl.getText().substring(1, ctx.repl.getText().length() - 1);
bstVMContext.functions().put(ctx.id.getText(),
(visitor, functionContext) -> bstVMContext.stack().push(replacement));
return BstVM.TRUE;
}
@Override
public Integer visitReadCommand(BstParser.ReadCommandContext ctx) {
FieldWriter fieldWriter = new FieldWriter(new FieldPreferences(true, List.of(StandardField.MONTH), Collections.emptyList()));
for (BstEntry e : bstVMContext.entries()) {
for (Map.Entry<String, String> mEntry : e.fields.entrySet()) {
Field field = FieldFactory.parseField(mEntry.getKey());
String fieldValue = e.entry.getResolvedFieldOrAlias(field, bstVMContext.bibDatabase())
.map(content -> {
try {
String result = fieldWriter.write(field, content);
if (result.startsWith("{")) {
// Strip enclosing {} from the output
return result.substring(1, result.length() - 1);
}
if (field == StandardField.MONTH) {
// We don't have the internal BibTeX strings at hand.
// Thus, we look up the full month name in the generic table.
return Month.parse(result)
.map(Month::getFullName)
.orElse(result);
}
return result;
} catch (
InvalidFieldValueException invalidFieldValueException) {
// in case there is something wrong with the content, just return the content itself
return content;
}
})
.orElse(null);
mEntry.setValue(fieldValue);
}
}
for (BstEntry e : bstVMContext.entries()) {
if (!e.fields.containsKey(StandardField.CROSSREF.getName())) {
e.fields.put(StandardField.CROSSREF.getName(), null);
}
}
return BstVM.TRUE;
}
@Override
public Integer visitExecuteCommand(BstParser.ExecuteCommandContext ctx) {
this.selectedBstEntry = null;
visit(ctx.bstFunction());
return BstVM.TRUE;
}
@Override
public Integer visitIterateCommand(BstParser.IterateCommandContext ctx) {
for (BstEntry entry : bstVMContext.entries()) {
this.selectedBstEntry = entry;
visit(ctx.bstFunction());
}
return BstVM.TRUE;
}
@Override
public Integer visitReverseCommand(BstParser.ReverseCommandContext ctx) {
ListIterator<BstEntry> i = bstVMContext.entries().listIterator(bstVMContext.entries().size());
while (i.hasPrevious()) {
this.selectedBstEntry = i.previous();
visit(ctx.bstFunction());
}
return BstVM.TRUE;
}
@Override
public Integer visitEntryCommand(BstParser.EntryCommandContext ctx) {
// ENTRY command contains 3 optionally filled identifier lists:
// Fields, Integers and Strings
BstParser.IdListOptContext entryFields = ctx.idListOpt(0);
for (BstParser.IdentifierContext identifierContext : entryFields.identifier()) {
for (BstEntry entry : bstVMContext.entries()) {
entry.fields.put(identifierContext.getText(), null);
}
}
BstParser.IdListOptContext entryIntegers = ctx.idListOpt(1);
for (BstParser.IdentifierContext identifierContext : entryIntegers.identifier()) {
for (BstEntry entry : bstVMContext.entries()) {
entry.localIntegers.put(identifierContext.getText(), 0);
}
}
BstParser.IdListOptContext entryStrings = ctx.idListOpt(2);
for (BstParser.IdentifierContext identifierContext : entryStrings.identifier()) {
for (BstEntry entry : bstVMContext.entries()) {
entry.localStrings.put(identifierContext.getText(), null);
}
}
for (BstEntry entry : bstVMContext.entries()) {
entry.localStrings.put("sort.key$", null);
}
return BstVM.TRUE;
}
@Override
public Integer visitSortCommand(BstParser.SortCommandContext ctx) {
bstVMContext.entries().sort(Comparator.comparing(o -> (o.localStrings.get("sort.key$"))));
return BstVM.TRUE;
}
@Override
public Integer visitIdentifier(BstParser.IdentifierContext ctx) {
resolveIdentifier(ctx.IDENTIFIER().getText(), ctx);
return BstVM.TRUE;
}
protected void resolveIdentifier(String name, ParserRuleContext ctx) {
if (selectedBstEntry != null) {
if (selectedBstEntry.fields.containsKey(name)) {
bstVMContext.stack().push(selectedBstEntry.fields.get(name));
return;
}
if (selectedBstEntry.localStrings.containsKey(name)) {
bstVMContext.stack().push(selectedBstEntry.localStrings.get(name));
return;
}
if (selectedBstEntry.localIntegers.containsKey(name)) {
bstVMContext.stack().push(selectedBstEntry.localIntegers.get(name));
return;
}
}
if (bstVMContext.strings().containsKey(name)) {
bstVMContext.stack().push(bstVMContext.strings().get(name));
return;
}
if (bstVMContext.integers().containsKey(name)) {
bstVMContext.stack().push(bstVMContext.integers().get(name));
return;
}
if (bstVMContext.functions().containsKey(name)) {
bstVMContext.functions().get(name).execute(this, ctx);
return;
}
throw new BstVMException("No matching identifier found: " + name);
}
@Override
public Integer visitBstFunction(BstParser.BstFunctionContext ctx) {
String name = ctx.getChild(0).getText();
if (bstVMContext.functions().containsKey(name)) {
bstVMContext.functions().get(name).execute(this, ctx, selectedBstEntry);
} else {
visit(ctx.getChild(0));
}
return BstVM.TRUE;
}
@Override
public Integer visitStackitem(BstParser.StackitemContext ctx) {
for (ParseTree childNode : ctx.children) {
try {
if (childNode instanceof TerminalNode token) {
switch (token.getSymbol().getType()) {
case BstParser.STRING -> {
String s = token.getText();
bstVMContext.stack().push(s.substring(1, s.length() - 1));
}
case BstParser.INTEGER ->
bstVMContext.stack().push(Integer.parseInt(token.getText().substring(1)));
case BstParser.QUOTED ->
bstVMContext.stack().push(new Identifier(token.getText().substring(1)));
}
} else if (childNode instanceof BstParser.StackContext) {
bstVMContext.stack().push(childNode);
} else {
this.visit(childNode);
}
} catch (BstVMException e) {
bstVMContext.path().ifPresentOrElse(
path -> LOGGER.error("{} ({})", e.getMessage(), path),
() -> LOGGER.error(e.getMessage()));
throw e;
}
}
return BstVM.TRUE;
}
}
| 10,619
| 39.227273
| 135
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/util/BstCaseChanger.java
|
package org.jabref.logic.bst.util;
import java.util.Locale;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public final class BstCaseChanger {
private static final Logger LOGGER = LoggerFactory.getLogger(BstCaseChanger.class);
// stores whether the char before the current char was a colon
private boolean prevColon = true;
// global variable to store the current brace level
private int braceLevel;
public enum FormatMode {
// First character and character after a ":" as upper case - everything else in lower case. Obey {}.
TITLE_LOWERS('t'),
// All characters lower case - Obey {}
ALL_LOWERS('l'),
// all characters upper case - Obey {}
ALL_UPPERS('u');
// the following would have to be done if the functionality of CaseChangers would be included here
// However, we decided against it and will probably do the other way round: https://github.com/JabRef/jabref/pull/215#issuecomment-146981624
// Each word should start with a capital letter
// EACH_FIRST_UPPERS('f'),
// Converts all words to upper case, but converts articles, prepositions, and conjunctions to lower case
// Capitalizes first and last word
// Does not change words starting with "{"
// DIFFERENCE to old CaseChangers.TITLE: last word is NOT capitalized in all cases
// TITLE_UPPERS('T');
private final char asChar;
FormatMode(char asChar) {
this.asChar = asChar;
}
public char asChar() {
return asChar;
}
/**
* Convert bstFormat char into ENUM
*
* @throws IllegalArgumentException if char is not 't', 'l', 'u'
*/
public static FormatMode of(final char bstFormat) {
for (FormatMode mode : FormatMode.values()) {
if (mode.asChar == bstFormat) {
return mode;
}
}
throw new IllegalArgumentException();
}
public static FormatMode of(final String bstFormat) {
return of(bstFormat.toLowerCase(Locale.ROOT).charAt(0));
}
}
private BstCaseChanger() {
}
/**
* Changes case of the given string s
*
* @param s the string to handle
* @param format the format
*/
public static String changeCase(String s, FormatMode format) {
return (new BstCaseChanger()).doChangeCase(s, format);
}
private String doChangeCase(String s, FormatMode format) {
char[] c = s.toCharArray();
StringBuilder sb = new StringBuilder();
int i = 0;
int n = s.length();
while (i < n) {
if (c[i] == '{') {
braceLevel++;
if ((braceLevel != 1) || ((i + 4) > n) || (c[i + 1] != '\\')) {
prevColon = false;
sb.append(c[i]);
i++;
continue;
}
if ((format == FormatMode.TITLE_LOWERS) && ((i == 0) || (prevColon && Character.isWhitespace(c[i - 1])))) {
sb.append('{');
i++;
prevColon = false;
continue;
}
i = convertSpecialChar(sb, c, i, format);
continue;
}
if (c[i] == '}') {
sb.append(c[i]);
i++;
if (braceLevel == 0) {
LOGGER.warn("Too many closing braces in string: " + s);
} else {
braceLevel--;
}
prevColon = false;
continue;
}
if (braceLevel == 0) {
i = convertCharIfBraceLevelIsZero(c, i, sb, format);
continue;
}
sb.append(c[i]);
i++;
}
if (braceLevel > 0) {
LOGGER.warn("No enough closing braces in string: " + s);
}
return sb.toString();
}
/**
* We're dealing with a special character (usually either an undotted `\i'
* or `\j', or an accent like one in Table~3.1 of the \LaTeX\ manual, or a
* foreign character like one in Table~3.2) if the first character after the
* |left_brace| is a |backslash|; the special character ends with the
* matching |right_brace|. How we handle what is in between depends on the
* special character. In general, this code will do reasonably well if there
* is other stuff, too, between braces, but it doesn't try to do anything
* special with |colon|s.
*
* @param start the current position. It points to the opening brace
*/
private int convertSpecialChar(StringBuilder sb, char[] c, int start, FormatMode format) {
int i = start;
sb.append(c[i]);
i++; // skip over open brace
while ((i < c.length) && (braceLevel > 0)) {
sb.append(c[i]);
i++;
// skip over the |backslash|
Optional<String> s = BstCaseChanger.findSpecialChar(c, i);
if (s.isPresent()) {
i = convertAccented(c, i, s.get(), sb, format);
}
while ((i < c.length) && (braceLevel > 0) && (c[i] != '\\')) {
if (c[i] == '}') {
braceLevel--;
} else if (c[i] == '{') {
braceLevel++;
}
i = convertNonControl(c, i, sb, format);
}
}
return i;
}
/**
* Convert the given string according to the format character (title, lower,
* up) and append the result to the stringBuffer, return the updated
* position.
*
* @return the new position
*/
private int convertAccented(char[] c, int start, String s, StringBuilder sb, FormatMode format) {
int pos = start;
pos += s.length();
switch (format) {
case TITLE_LOWERS:
case ALL_LOWERS:
if ("L O OE AE AA".contains(s)) {
sb.append(s.toLowerCase(Locale.ROOT));
} else {
sb.append(s);
}
break;
case ALL_UPPERS:
if ("l o oe ae aa".contains(s)) {
sb.append(s.toUpperCase(Locale.ROOT));
} else if ("i j ss".contains(s)) {
sb.deleteCharAt(sb.length() - 1); // Kill backslash
sb.append(s.toUpperCase(Locale.ROOT));
while ((pos < c.length) && Character.isWhitespace(c[pos])) {
pos++;
}
} else {
sb.append(s);
}
break;
default:
LOGGER.info("convertAccented - Unknown format: " + format);
break;
}
return pos;
}
private int convertNonControl(char[] c, int start, StringBuilder sb, FormatMode format) {
int pos = start;
switch (format) {
case TITLE_LOWERS, ALL_LOWERS -> {
sb.append(Character.toLowerCase(c[pos]));
pos++;
}
case ALL_UPPERS -> {
sb.append(Character.toUpperCase(c[pos]));
pos++;
}
default ->
LOGGER.info("convertNonControl - Unknown format: " + format);
}
return pos;
}
private int convertCharIfBraceLevelIsZero(char[] c, int start, StringBuilder sb, FormatMode format) {
int i = start;
switch (format) {
case TITLE_LOWERS -> {
if ((i == 0) || (prevColon && Character.isWhitespace(c[i - 1]))) {
sb.append(c[i]);
} else {
sb.append(Character.toLowerCase(c[i]));
}
if (c[i] == ':') {
prevColon = true;
} else if (!Character.isWhitespace(c[i])) {
prevColon = false;
}
}
case ALL_LOWERS ->
sb.append(Character.toLowerCase(c[i]));
case ALL_UPPERS ->
sb.append(Character.toUpperCase(c[i]));
default ->
LOGGER.info("convertCharIfBraceLevelIsZero - Unknown format: " + format);
}
i++;
return i;
}
/**
* Determine whether there starts a special char at pos (e.g., oe, AE). Return it as string.
* If nothing found, return Optional.empty()
*
* <p>
* Also used by BibtexPurify
*
* @param c the current "String"
* @param pos the position
* @return the special LaTeX character or null
*/
public static Optional<String> findSpecialChar(char[] c, int pos) {
if ((pos + 1) < c.length) {
if ((c[pos] == 'o') && (c[pos + 1] == 'e')) {
return Optional.of("oe");
}
if ((c[pos] == 'O') && (c[pos + 1] == 'E')) {
return Optional.of("OE");
}
if ((c[pos] == 'a') && (c[pos + 1] == 'e')) {
return Optional.of("ae");
}
if ((c[pos] == 'A') && (c[pos + 1] == 'E')) {
return Optional.of("AE");
}
if ((c[pos] == 's') && (c[pos + 1] == 's')) {
return Optional.of("ss");
}
if ((c[pos] == 'A') && (c[pos + 1] == 'A')) {
return Optional.of("AA");
}
if ((c[pos] == 'a') && (c[pos + 1] == 'a')) {
return Optional.of("aa");
}
}
if ("ijoOlL".indexOf(c[pos]) >= 0) {
return Optional.of(String.valueOf(c[pos]));
}
return Optional.empty();
}
}
| 9,968
| 32.565657
| 148
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/util/BstNameFormatter.java
|
package org.jabref.logic.bst.util;
import java.util.Arrays;
import java.util.Locale;
import java.util.Optional;
import java.util.stream.Collectors;
import org.jabref.logic.bst.BstVMException;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* From Bibtex:
*
* "The |built_in| function {\.{format.name\$}} pops the
* top three literals (they are a string, an integer, and a string
* literal, in that order). The last string literal represents a
* name list (each name corresponding to a person), the integer
* literal specifies which name to pick from this list, and the
* first string literal specifies how to format this name, as
* described in the \BibTeX\ documentation. Finally, this function
* pushes the formatted name. If any of the types is incorrect, it
* complains and pushes the null string."
*
* Sounds easy - is a nightmare... X-(
*
*/
public class BstNameFormatter {
private static final Logger LOGGER = LoggerFactory.getLogger(BstNameFormatter.class);
private BstNameFormatter() {
}
/**
* Formats the nth author of the author name list by a given format string
*
* @param authorsNameList The string from an author field
* @param whichName index of the list, starting with 1
* @param formatString TODO
*/
public static String formatName(String authorsNameList, int whichName, String formatString) {
AuthorList al = AuthorList.parse(authorsNameList);
if ((whichName < 1) && (whichName > al.getNumberOfAuthors())) {
LOGGER.warn("AuthorList {} does not contain an author with number {}", authorsNameList, whichName);
return "";
}
return BstNameFormatter.formatName(al.getAuthor(whichName - 1), formatString);
}
public static String formatName(Author author, String format) {
StringBuilder sb = new StringBuilder();
char[] c = format.toCharArray();
int n = c.length;
int braceLevel = 0;
int group = 0;
int i = 0;
while (i < n) {
if (c[i] == '{') {
group++;
i++;
braceLevel++;
StringBuilder level1Chars = new StringBuilder();
StringBuilder wholeChar = new StringBuilder();
while ((i < n) && (braceLevel > 0)) {
wholeChar.append(c[i]);
if (c[i] == '{') {
braceLevel++;
i++;
continue;
}
if (c[i] == '}') {
braceLevel--;
i++;
continue;
}
if ((braceLevel == 1) && Character.isLetter(c[i])) {
if ("fvlj".indexOf(c[i]) == -1) {
LOGGER.warn("Format string in format.name$ may only contain fvlj on brace level 1 in group {}: {}", group, format);
} else {
level1Chars.append(c[i]);
}
}
i++;
}
i--; // unskip last brace (for last i++ at the end)
String control = level1Chars.toString().toLowerCase(Locale.ROOT);
if (control.isEmpty()) {
continue;
}
if (control.length() > 2) {
LOGGER.warn("Format string in format.name$ may only be one or two character long on brace level 1 in group {}: {}", group, format);
}
char type = control.charAt(0);
Optional<String> tokenS = switch (type) {
case 'f' ->
author.getFirst();
case 'v' ->
author.getVon();
case 'l' ->
author.getLast();
case 'j' ->
author.getJr();
default ->
throw new BstVMException("Internal error");
};
if (tokenS.isEmpty()) {
i++;
continue;
}
String[] tokens = tokenS.get().split(" ");
boolean abbreviateThatIsSingleLetter = true;
if (control.length() == 2) {
if (control.charAt(1) == control.charAt(0)) {
abbreviateThatIsSingleLetter = false;
} else {
LOGGER.warn("Format string in format.name$ may only contain one type of vlfj on brace level 1 in group {}: {}", group, format);
}
}
// Now we know what to do
if ((braceLevel == 0) && (wholeChar.charAt(wholeChar.length() - 1) == '}')) {
wholeChar.deleteCharAt(wholeChar.length() - 1);
}
char[] d = wholeChar.toString().toCharArray();
int bLevel = 1;
String interToken = null;
int groupStart = sb.length();
for (int j = 0; j < d.length; j++) {
if (Character.isLetter(d[j]) && (bLevel == 1)) {
groupStart = sb.length();
if (!abbreviateThatIsSingleLetter) {
j++;
}
if (((j + 1) < d.length) && (d[j + 1] == '{')) {
StringBuilder interTokenSb = new StringBuilder();
j = BstNameFormatter.consumeToMatchingBrace(interTokenSb, d, j + 1);
interToken = interTokenSb.substring(1, interTokenSb.length() - 1);
}
for (int k = 0; k < tokens.length; k++) {
String token = tokens[k];
if (abbreviateThatIsSingleLetter) {
String[] dashes = token.split("-");
token = Arrays.stream(dashes).map(BstNameFormatter::getFirstCharOfString)
.collect(Collectors.joining(".-"));
}
// Output token
sb.append(token);
if (k < (tokens.length - 1)) {
// Output Intertoken String
if (interToken == null) {
if (abbreviateThatIsSingleLetter) {
sb.append('.');
}
// No clue what this means (What the hell are tokens anyway???
// if (lex_class[name_sep_char[cur_token]] = sep_char) then
// append_ex_buf_char_and_check (name_sep_char[cur_token])
if ((k == (tokens.length - 2)) || (BstNameFormatter.numberOfChars(sb.substring(groupStart, sb.length()), 3) < 3)) {
sb.append('~');
} else {
sb.append(' ');
}
} else {
sb.append(interToken);
}
}
}
} else if (d[j] == '}') {
bLevel--;
if (bLevel > 0) {
sb.append('}');
}
} else if (d[j] == '{') {
bLevel++;
sb.append('{');
} else {
sb.append(d[j]);
}
}
if (sb.length() > 0) {
boolean noDisTie = false;
if ((sb.charAt(sb.length() - 1) == '~') &&
((BstNameFormatter.numberOfChars(sb.substring(groupStart, sb.length()), 4) >= 4) ||
((sb.length() > 1) && (noDisTie = sb.charAt(sb.length() - 2) == '~')))) {
sb.deleteCharAt(sb.length() - 1);
if (!noDisTie) {
sb.append(' ');
}
}
}
} else if (c[i] == '}') {
LOGGER.warn("Unmatched brace in format string: {}", format);
} else {
sb.append(c[i]); // verbatim
}
i++;
}
if (braceLevel != 0) {
LOGGER.warn("Unbalanced brace in format string for nameFormat: {}", format);
}
return sb.toString();
}
/**
* Including the matching brace.
*/
public static int consumeToMatchingBrace(StringBuilder interTokenSb, char[] c, int pos) {
int braceLevel = 0;
for (int i = pos; i < c.length; i++) {
if (c[i] == '}') {
braceLevel--;
if (braceLevel == 0) {
interTokenSb.append('}');
return i;
}
} else if (c[i] == '{') {
braceLevel++;
}
interTokenSb.append(c[i]);
}
return c.length;
}
/**
* Takes care of special characters too
*/
public static String getFirstCharOfString(String s) {
char[] c = s.toCharArray();
for (int i = 0; i < c.length; i++) {
if (Character.isLetter(c[i])) {
return String.valueOf(c[i]);
}
if ((c[i] == '{') && ((i + 1) < c.length) && (c[i + 1] == '\\')) {
StringBuilder sb = new StringBuilder();
BstNameFormatter.consumeToMatchingBrace(sb, c, i);
return sb.toString();
}
}
return "";
}
public static int numberOfChars(String token, int inStop) {
int stop = inStop;
if (stop < 0) {
stop = Integer.MAX_VALUE;
}
int result = 0;
int i = 0;
char[] c = token.toCharArray();
int n = c.length;
int braceLevel = 0;
while ((i < n) && (result < stop)) {
i++;
if (c[i - 1] == '{') {
braceLevel++;
if ((braceLevel == 1) && (i < n) && (c[i] == '\\')) {
i++;
while ((i < n) && (braceLevel > 0)) {
if (c[i] == '}') {
braceLevel--;
} else if (c[i] == '{') {
braceLevel++;
}
i++;
}
}
} else if (c[i - 1] == '}') {
braceLevel--;
}
result++;
}
return result;
}
}
| 11,244
| 36.483333
| 151
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/util/BstPurifier.java
|
package org.jabref.logic.bst.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* The |built_in| function {\.{purify\$}} pops the top (string) literal, removes
* nonalphanumeric characters except for |white_space| and |sep_char| characters
* (these get converted to a |space|) and removes certain alphabetic characters
* contained in the control sequences associated with a special character, and
* pushes the resulting string. If the literal isn't a string, it complains and
* pushes the null string.
*
*/
public class BstPurifier {
private static final Logger LOGGER = LoggerFactory.getLogger(BstPurifier.class);
private BstPurifier() {
}
public static String purify(String toPurify) {
StringBuilder sb = new StringBuilder();
char[] cs = toPurify.toCharArray();
int n = cs.length;
int i = 0;
int braceLevel = 0;
while (i < n) {
char c = cs[i];
if (Character.isWhitespace(c) || (c == '-') || (c == '~')) {
sb.append(' ');
} else if (Character.isLetterOrDigit(c)) {
sb.append(c);
} else if (c == '{') {
braceLevel++;
if ((braceLevel == 1) && ((i + 1) < n) && (cs[i + 1] == '\\')) {
i++; // skip brace
while ((i < n) && (braceLevel > 0)) {
i++; // skip backslash
BstCaseChanger.findSpecialChar(cs, i).ifPresent(sb::append);
while ((i < n) && Character.isLetter(cs[i])) {
i++;
}
while ((i < n) && (braceLevel > 0) && ((c = cs[i]) != '\\')) {
if (Character.isLetterOrDigit(c)) {
sb.append(c);
} else if (c == '}') {
braceLevel--;
} else if (c == '{') {
braceLevel++;
}
i++;
}
}
continue;
}
} else if (c == '}') {
if (braceLevel > 0) {
braceLevel--;
} else {
LOGGER.warn("Unbalanced brace in string for purify$: {}", toPurify);
}
}
i++;
}
if (braceLevel != 0) {
LOGGER.warn("Unbalanced brace in string for purify$: {}", toPurify);
}
return sb.toString();
}
}
| 2,643
| 33.337662
| 88
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/util/BstTextPrefixer.java
|
package org.jabref.logic.bst.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The |built_in| function {\.{text.prefix\$}} pops the top two literals (the
* integer literal |pop_lit1| and a string literal, in that order). It pushes
* the substring of the (at most) |pop_lit1| consecutive text characters
* starting from the beginning of the string. This function is similar to
* {\.{substring\$}}, but this one considers an accented character (or more
* precisely, a ``special character''$\!$, even if it's missing its matching
* |right_brace|) to be a single text character (rather than however many
* |ASCII_code| characters it actually comprises), and this function doesn't
* consider braces to be text characters; furthermore, this function appends any
* needed matching |right_brace|s. If any of the types is incorrect, it
* complains and pushes the null string.
*
*/
public class BstTextPrefixer {
private static final Logger LOGGER = LoggerFactory.getLogger(BstTextPrefixer.class);
private BstTextPrefixer() {
}
public static String textPrefix(int inNumOfChars, String toPrefix) {
int numOfChars = inNumOfChars;
StringBuilder sb = new StringBuilder();
char[] cs = toPrefix.toCharArray();
int n = cs.length;
int i = 0;
int braceLevel = 0;
while ((i < n) && (numOfChars > 0)) {
char c = cs[i];
i++;
if (c == '{') {
braceLevel++;
if ((braceLevel == 1) && (i < n) && (cs[i] == '\\')) {
i++; // skip backslash
while ((i < n) && (braceLevel > 0)) {
if (cs[i] == '}') {
braceLevel--;
} else if (cs[i] == '{') {
braceLevel++;
}
i++;
}
numOfChars--;
}
} else if (c == '}') {
if (braceLevel > 0) {
braceLevel--;
} else {
LOGGER.warn("Unbalanced brace in string for purify$: {}", toPrefix);
}
} else {
numOfChars--;
}
}
sb.append(toPrefix, 0, i);
while (braceLevel > 0) {
sb.append('}');
braceLevel--;
}
return sb.toString();
}
}
| 2,461
| 33.194444
| 88
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/bst/util/BstWidthCalculator.java
|
package org.jabref.logic.bst.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* The |built_in| function {\.{purify\$}} pops the top (string) literal, removes
* nonalphanumeric characters except for |white_space| and |sep_char| characters
* (these get converted to a |space|) and removes certain alphabetic characters
* contained in the control sequences associated with a special character, and
* pushes the resulting string. If the literal isn't a string, it complains and
* pushes the null string.
*
*/
public class BstWidthCalculator {
private static final Logger LOGGER = LoggerFactory.getLogger(BstWidthCalculator.class);
/*
* Quoted from Bibtex:
*
* Now we initialize the system-dependent |char_width| array, for which
* |space| is the only |white_space| character given a nonzero printing
* width. The widths here are taken from Stanford's June~'87 $cmr10$~font
* and represent hundredths of a point (rounded), but since they're used
* only for relative comparisons, the units have no meaning.
*/
private static int[] widths;
static {
if (BstWidthCalculator.widths == null) {
BstWidthCalculator.widths = new int[128];
for (int i = 0; i < 128; i++) {
BstWidthCalculator.widths[i] = 0;
}
BstWidthCalculator.widths[32] = 278;
BstWidthCalculator.widths[33] = 278;
BstWidthCalculator.widths[34] = 500;
BstWidthCalculator.widths[35] = 833;
BstWidthCalculator.widths[36] = 500;
BstWidthCalculator.widths[37] = 833;
BstWidthCalculator.widths[38] = 778;
BstWidthCalculator.widths[39] = 278;
BstWidthCalculator.widths[40] = 389;
BstWidthCalculator.widths[41] = 389;
BstWidthCalculator.widths[42] = 500;
BstWidthCalculator.widths[43] = 778;
BstWidthCalculator.widths[44] = 278;
BstWidthCalculator.widths[45] = 333;
BstWidthCalculator.widths[46] = 278;
BstWidthCalculator.widths[47] = 500;
BstWidthCalculator.widths[48] = 500;
BstWidthCalculator.widths[49] = 500;
BstWidthCalculator.widths[50] = 500;
BstWidthCalculator.widths[51] = 500;
BstWidthCalculator.widths[52] = 500;
BstWidthCalculator.widths[53] = 500;
BstWidthCalculator.widths[54] = 500;
BstWidthCalculator.widths[55] = 500;
BstWidthCalculator.widths[56] = 500;
BstWidthCalculator.widths[57] = 500;
BstWidthCalculator.widths[58] = 278;
BstWidthCalculator.widths[59] = 278;
BstWidthCalculator.widths[60] = 278;
BstWidthCalculator.widths[61] = 778;
BstWidthCalculator.widths[62] = 472;
BstWidthCalculator.widths[63] = 472;
BstWidthCalculator.widths[64] = 778;
BstWidthCalculator.widths[65] = 750;
BstWidthCalculator.widths[66] = 708;
BstWidthCalculator.widths[67] = 722;
BstWidthCalculator.widths[68] = 764;
BstWidthCalculator.widths[69] = 681;
BstWidthCalculator.widths[70] = 653;
BstWidthCalculator.widths[71] = 785;
BstWidthCalculator.widths[72] = 750;
BstWidthCalculator.widths[73] = 361;
BstWidthCalculator.widths[74] = 514;
BstWidthCalculator.widths[75] = 778;
BstWidthCalculator.widths[76] = 625;
BstWidthCalculator.widths[77] = 917;
BstWidthCalculator.widths[78] = 750;
BstWidthCalculator.widths[79] = 778;
BstWidthCalculator.widths[80] = 681;
BstWidthCalculator.widths[81] = 778;
BstWidthCalculator.widths[82] = 736;
BstWidthCalculator.widths[83] = 556;
BstWidthCalculator.widths[84] = 722;
BstWidthCalculator.widths[85] = 750;
BstWidthCalculator.widths[86] = 750;
BstWidthCalculator.widths[87] = 1028;
BstWidthCalculator.widths[88] = 750;
BstWidthCalculator.widths[89] = 750;
BstWidthCalculator.widths[90] = 611;
BstWidthCalculator.widths[91] = 278;
BstWidthCalculator.widths[92] = 500;
BstWidthCalculator.widths[93] = 278;
BstWidthCalculator.widths[94] = 500;
BstWidthCalculator.widths[95] = 278;
BstWidthCalculator.widths[96] = 278;
BstWidthCalculator.widths[97] = 500;
BstWidthCalculator.widths[98] = 556;
BstWidthCalculator.widths[99] = 444;
BstWidthCalculator.widths[100] = 556;
BstWidthCalculator.widths[101] = 444;
BstWidthCalculator.widths[102] = 306;
BstWidthCalculator.widths[103] = 500;
BstWidthCalculator.widths[104] = 556;
BstWidthCalculator.widths[105] = 278;
BstWidthCalculator.widths[106] = 306;
BstWidthCalculator.widths[107] = 528;
BstWidthCalculator.widths[108] = 278;
BstWidthCalculator.widths[109] = 833;
BstWidthCalculator.widths[110] = 556;
BstWidthCalculator.widths[111] = 500;
BstWidthCalculator.widths[112] = 556;
BstWidthCalculator.widths[113] = 528;
BstWidthCalculator.widths[114] = 392;
BstWidthCalculator.widths[115] = 394;
BstWidthCalculator.widths[116] = 389;
BstWidthCalculator.widths[117] = 556;
BstWidthCalculator.widths[118] = 528;
BstWidthCalculator.widths[119] = 722;
BstWidthCalculator.widths[120] = 528;
BstWidthCalculator.widths[121] = 528;
BstWidthCalculator.widths[122] = 444;
BstWidthCalculator.widths[123] = 500;
BstWidthCalculator.widths[124] = 1000;
BstWidthCalculator.widths[125] = 500;
BstWidthCalculator.widths[126] = 500;
}
}
private BstWidthCalculator() {
}
private static int getSpecialCharWidth(char[] c, int pos) {
if ((pos + 1) < c.length) {
if ((c[pos] == 'o') && (c[pos + 1] == 'e')) {
return 778;
}
if ((c[pos] == 'O') && (c[pos + 1] == 'E')) {
return 1014;
}
if ((c[pos] == 'a') && (c[pos + 1] == 'e')) {
return 722;
}
if ((c[pos] == 'A') && (c[pos + 1] == 'E')) {
return 903;
}
if ((c[pos] == 's') && (c[pos + 1] == 's')) {
return 500;
}
}
return BstWidthCalculator.getCharWidth(c[pos]);
}
public static int getCharWidth(char c) {
if ((c >= 0) && (c < 128)) {
return BstWidthCalculator.widths[c];
} else {
return 0;
}
}
public static int width(String toMeasure) {
/*
* From Bibtex: We use the natural width for all but special characters,
* and we complain if the string isn't brace-balanced.
*/
int i = 0;
int n = toMeasure.length();
int braceLevel = 0;
char[] c = toMeasure.toCharArray();
int result = 0;
/*
* From Bibtex:
*
* We use the natural widths of all characters except that some
* characters have no width: braces, control sequences (except for the
* usual 13 accented and foreign characters, whose widths are given in
* the next module), and |white_space| following control sequences (even
* a null control sequence).
*
*/
while (i < n) {
if (c[i] == '{') {
braceLevel++;
if ((braceLevel == 1) && ((i + 1) < n) && (c[i + 1] == '\\')) {
i++; // skip brace
while ((i < n) && (braceLevel > 0)) {
i++; // skip backslash
int afterBackslash = i;
while ((i < n) && Character.isLetter(c[i])) {
i++;
}
if ((i < n) && (i == afterBackslash)) {
i++; // Skip non-alpha control seq
} else {
if (BstCaseChanger.findSpecialChar(c, afterBackslash).isPresent()) {
result += BstWidthCalculator.getSpecialCharWidth(c, afterBackslash);
}
}
while ((i < n) && Character.isWhitespace(c[i])) {
i++;
}
while ((i < n) && (braceLevel > 0) && (c[i] != '\\')) {
if (c[i] == '}') {
braceLevel--;
} else if (c[i] == '{') {
braceLevel++;
} else {
result += BstWidthCalculator.getCharWidth(c[i]);
}
i++;
}
}
continue;
}
} else if (c[i] == '}') {
if (braceLevel > 0) {
braceLevel--;
} else {
LOGGER.warn("Too many closing braces in string: " + toMeasure);
}
}
result += BstWidthCalculator.getCharWidth(c[i]);
i++;
}
if (braceLevel > 0) {
LOGGER.warn("No enough closing braces in string: " + toMeasure);
}
return result;
}
}
| 9,805
| 39.520661
| 100
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/AbstractCitationKeyPattern.java
|
package org.jabref.logic.citationkeypattern;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.stream.Collectors;
import org.jabref.model.entry.types.EntryType;
/**
* A small table, where an entry type is associated with a Bibtex key pattern (an
* <code>ArrayList</code>). A parent CitationKeyPattern can be set.
*/
public abstract class AbstractCitationKeyPattern {
protected List<String> defaultPattern = new ArrayList<>();
protected Map<EntryType, List<String>> data = new HashMap<>();
/**
* This method takes a string of the form [field1]spacer[field2]spacer[field3]..., where the fields are the
* (required) fields of a BibTex entry. The string is split into fields and spacers by recognizing the [ and ].
*
* @param bibtexKeyPattern a <code>String</code>
* @return an <code>ArrayList</code> The first item of the list is a string representation of the key pattern (the
* parameter), the remaining items are the fields
*/
public static List<String> split(String bibtexKeyPattern) {
// A holder for fields of the entry to be used for the key
List<String> fieldList = new ArrayList<>();
// Before we do anything, we add the parameter to the ArrayLIst
fieldList.add(bibtexKeyPattern);
StringTokenizer tok = new StringTokenizer(bibtexKeyPattern, "[]", true);
while (tok.hasMoreTokens()) {
fieldList.add(tok.nextToken());
}
return fieldList;
}
public void addCitationKeyPattern(EntryType type, String pattern) {
data.put(type, AbstractCitationKeyPattern.split(pattern));
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("AbstractCitationKeyPattern{");
sb.append("defaultPattern=").append(defaultPattern);
sb.append(", data=").append(data);
sb.append('}');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
AbstractCitationKeyPattern that = (AbstractCitationKeyPattern) o;
return Objects.equals(defaultPattern, that.defaultPattern) && Objects.equals(data, that.data);
}
@Override
public int hashCode() {
return Objects.hash(defaultPattern, data);
}
/**
* Gets an object for a desired key from this CitationKeyPattern or one of it's parents (in the case of
* DatabaseCitationKeyPattern). This method first tries to obtain the object from this CitationKeyPattern via the
* <code>get</code> method of <code>Hashtable</code>. If this fails, we try the default.<br /> If that fails, we try
* the parent.<br /> If that fails, we return the DEFAULT_LABELPATTERN<br />
*
* @param entryType a <code>String</code>
* @return the list of Strings for the given key. First entry: the complete key
*/
public List<String> getValue(EntryType entryType) {
List<String> result = data.get(entryType);
// Test to see if we found anything
if (result == null) {
// check default value
result = getDefaultValue();
if (result == null || result.isEmpty()) {
// we are the "last" to ask
// we don't have anything left
return getLastLevelCitationKeyPattern(entryType);
}
}
return result;
}
/**
* Checks whether this pattern is customized or the default value.
*/
public final boolean isDefaultValue(EntryType entryType) {
return data.get(entryType) == null;
}
/**
* This method is called "...Value" to be in line with the other methods
*
* @return null if not available.
*/
public List<String> getDefaultValue() {
return this.defaultPattern;
}
/**
* Sets the DEFAULT PATTERN for this key pattern
*
* @param bibtexKeyPattern the pattern to store
*/
public void setDefaultValue(String bibtexKeyPattern) {
Objects.requireNonNull(bibtexKeyPattern);
this.defaultPattern = AbstractCitationKeyPattern.split(bibtexKeyPattern);
}
public Set<EntryType> getAllKeys() {
return data.keySet();
}
public Map<EntryType, List<String>> getPatterns() {
return data.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
public abstract List<String> getLastLevelCitationKeyPattern(EntryType key);
}
| 4,750
| 33.933824
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/BracketedPattern.java
|
package org.jabref.logic.citationkeypattern;
import java.math.BigInteger;
import java.text.Normalizer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
import java.util.Scanner;
import java.util.StringJoiner;
import java.util.StringTokenizer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.jabref.logic.cleanup.Formatter;
import org.jabref.logic.formatter.Formatters;
import org.jabref.logic.formatter.casechanger.Word;
import org.jabref.logic.layout.format.RemoveLatexCommandsFormatter;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.Author;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Keyword;
import org.jabref.model.entry.KeywordList;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.LatexToUnicodeAdapter;
import org.jabref.model.strings.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class provides methods to expand bracketed expressions, such as
* <code>[year]_[author]_[firstpage]</code>, using information from a provided BibEntry. The above-mentioned expression would yield
* <code>2017_Kitsune_123</code> when expanded using the BibTeX entry <code>@Article{ authors = {O. Kitsune}, year = {2017},
* pages={123-6}}</code>.
* <p>
* The embedding in JabRef is explained at <a href="https://docs.jabref.org/setup/citationkeypattern">Customize the citation key generator</a>.
* </p>
*/
public class BracketedPattern {
private static final Logger LOGGER = LoggerFactory.getLogger(BracketedPattern.class);
/**
* The maximum number of characters in the first author's last name.
*/
private static final int CHARS_OF_FIRST = 5;
/**
* The maximum number of name abbreviations that can be used. If there are more authors, {@code MAX_ALPHA_AUTHORS -
* 1} name abbreviations will be displayed, and a + sign will be appended at the end.
*/
private static final int MAX_ALPHA_AUTHORS = 4;
/**
* Matches everything that is not a unicode decimal digit.
*/
private static final Pattern NOT_DECIMAL_DIGIT = Pattern.compile("\\P{Nd}");
/**
* Matches everything that is not an uppercase ASCII letter. The intended use is to remove all lowercase letters
*/
private static final Pattern NOT_CAPITAL_CHARACTER = Pattern.compile("[^A-Z]");
/**
* Matches uppercase english letters between "({" and "})", which should be used to abbreviate the name of an institution
*/
private static final Pattern INLINE_ABBREVIATION = Pattern.compile("(?<=\\(\\{)[A-Z]+(?=}\\))");
/**
* Matches with "dep"/"dip", case-insensitive
*/
private static final Pattern DEPARTMENTS = Pattern.compile("^d[ei]p.*", Pattern.CASE_INSENSITIVE);
private static final Pattern WHITESPACE = Pattern.compile("\\p{javaWhitespace}");
private enum Institution {
SCHOOL,
DEPARTMENT,
UNIVERSITY,
TECHNOLOGY;
/**
* Matches "uni" followed by "v" or "b", at the start of a string or after a space, case insensitive
*/
private static final Pattern UNIVERSITIES = Pattern.compile("^uni(v|b|$).*", Pattern.CASE_INSENSITIVE);
/**
* Matches with "tech", case-insensitive
*/
private static final Pattern TECHNOLOGICAL_INSTITUTES = Pattern.compile("^tech.*", Pattern.CASE_INSENSITIVE);
/**
* Matches with "dep"/"dip"/"lab", case insensitive
*/
private static final Pattern DEPARTMENTS_OR_LABS = Pattern.compile("^(d[ei]p|lab).*", Pattern.CASE_INSENSITIVE);
/**
* Find which types of institutions have words in common with the given name parts.
*
* @param nameParts a list of words that constitute parts of an institution's name.
* @return set containing all types that matches
*/
public static EnumSet<Institution> findTypes(List<String> nameParts) {
EnumSet<Institution> parts = EnumSet.noneOf(Institution.class);
// Deciding about a part type…
for (String namePart : nameParts) {
if (UNIVERSITIES.matcher(namePart).matches()) {
parts.add(Institution.UNIVERSITY);
} else if (TECHNOLOGICAL_INSTITUTES.matcher(namePart).matches()) {
parts.add(Institution.TECHNOLOGY);
} else if (StandardField.SCHOOL.getName().equalsIgnoreCase(namePart)) {
parts.add(Institution.SCHOOL);
} else if (DEPARTMENTS_OR_LABS.matcher(namePart).matches()) {
parts.add(Institution.DEPARTMENT);
}
}
if (parts.contains(Institution.TECHNOLOGY)) {
parts.remove(Institution.UNIVERSITY); // technology institute isn't university :-)
}
return parts;
}
}
private final String pattern;
public BracketedPattern() {
this.pattern = null;
}
public BracketedPattern(String pattern) {
this.pattern = pattern;
}
@Override
public String toString() {
return this.getClass().getName() + "[pattern=" + pattern + "]";
}
public String expand(BibEntry bibentry) {
return expand(bibentry, null);
}
/**
* Expands the current pattern using the given bibentry and database. ";" is used as keyword delimiter.
*
* @param bibentry The bibentry to expand.
* @param database The database to use for string-lookups and cross-refs. May be null.
* @return The expanded pattern. The empty string is returned, if it could not be expanded.
*/
public String expand(BibEntry bibentry, BibDatabase database) {
Objects.requireNonNull(bibentry);
Character keywordDelimiter = ';';
return expand(bibentry, keywordDelimiter, database);
}
/**
* Expands the current pattern using the given bibentry, keyword delimiter, and database.
*
* @param bibentry The bibentry to expand.
* @param keywordDelimiter The keyword delimiter to use.
* @param database The database to use for string-lookups and cross-refs. May be null.
* @return The expanded pattern. The empty string is returned, if it could not be expanded.
*/
public String expand(BibEntry bibentry, Character keywordDelimiter, BibDatabase database) {
Objects.requireNonNull(bibentry);
return expandBrackets(this.pattern, keywordDelimiter, bibentry, database);
}
/**
* Expands a pattern
*
* @param pattern The pattern to expand
* @param keywordDelimiter The keyword delimiter to use
* @param entry The bibentry to use for expansion
* @param database The database for field resolving. May be null.
* @return The expanded pattern. Not null.
*/
public static String expandBrackets(String pattern, Character keywordDelimiter, BibEntry entry, BibDatabase database) {
Objects.requireNonNull(pattern);
Objects.requireNonNull(entry);
return expandBrackets(pattern, expandBracketContent(keywordDelimiter, entry, database));
}
/**
* Utility method creating a function taking the string representation of the content of a bracketed expression and
* expanding it.
*
* @param keywordDelimiter The keyword delimiter to use
* @param entry The {@link BibEntry} to use for expansion
* @param database The {@link BibDatabase} for field resolving. May be null.
* @return a function accepting a bracketed expression and returning the result of expanding it
*/
public static Function<String, String> expandBracketContent(Character keywordDelimiter, BibEntry entry, BibDatabase database) {
return (String bracket) -> {
String expandedPattern;
List<String> fieldParts = parseFieldAndModifiers(bracket);
// check whether there is a modifier on the end such as
// ":lower":
expandedPattern = getFieldValue(entry, fieldParts.get(0), keywordDelimiter, database);
if (fieldParts.size() > 1) {
// apply modifiers:
expandedPattern = applyModifiers(expandedPattern, fieldParts, 1, expandBracketContent(keywordDelimiter, entry, database));
}
return expandedPattern;
};
}
/**
* Expands a pattern.
*
* @param pattern The pattern to expand
* @param bracketContentHandler A function taking the string representation of the content of a bracketed pattern
* and expanding it
* @return The expanded pattern. Not null.
*/
public static String expandBrackets(String pattern, Function<String, String> bracketContentHandler) {
Objects.requireNonNull(pattern);
StringBuilder expandedPattern = new StringBuilder();
StringTokenizer parsedPattern = new StringTokenizer(pattern, "\\[]\"", true);
while (parsedPattern.hasMoreTokens()) {
String token = parsedPattern.nextToken();
switch (token) {
case "\"" -> appendQuote(expandedPattern, parsedPattern);
case "[" -> {
String fieldMarker = contentBetweenBrackets(parsedPattern, pattern);
expandedPattern.append(bracketContentHandler.apply(fieldMarker));
}
case "\\" -> {
if (parsedPattern.hasMoreTokens()) {
expandedPattern.append(parsedPattern.nextToken());
} else {
LOGGER.warn("Found a \"\\\" that is not part of an escape sequence");
}
}
default -> expandedPattern.append(token);
}
}
return expandedPattern.toString();
}
/**
* Returns the content enclosed between brackets, including enclosed quotes, and excluding the paired enclosing brackets.
* There may be brackets in it.
* Intended to be used by {@link BracketedPattern#expandBrackets(String, Character, BibEntry, BibDatabase)} when a [
* is encountered, and has been consumed, by the {@code StringTokenizer}.
*
* @param pattern pattern used by {@code expandBrackets}, used for logging
* @param tokenizer the tokenizer producing the tokens
* @return the content enclosed by brackets
*/
private static String contentBetweenBrackets(StringTokenizer tokenizer, final String pattern) {
StringBuilder bracketContent = new StringBuilder();
boolean foundClosingBracket = false;
int subBrackets = 0;
// make sure to read until the paired ']'
while (tokenizer.hasMoreTokens() && !foundClosingBracket) {
String token = tokenizer.nextToken();
// If the beginning of a quote is found, append the content
switch (token) {
case "\"" -> appendQuote(bracketContent, tokenizer);
case "]" -> {
if (subBrackets == 0) {
foundClosingBracket = true;
} else {
subBrackets--;
bracketContent.append(token);
}
}
case "[" -> {
subBrackets++;
bracketContent.append(token);
}
default -> bracketContent.append(token);
}
}
if (!foundClosingBracket) {
LOGGER.warn("Missing closing bracket ']' in '{}'", pattern);
} else if (bracketContent.length() == 0) {
LOGGER.warn("Found empty brackets \"[]\" in '{}'", pattern);
}
return bracketContent.toString();
}
/**
* Appends the content between, and including, two \" to the provided <code>StringBuilder</code>. Intended to be
* used by {@link BracketedPattern#expandBrackets(String, Character, BibEntry, BibDatabase)} when a \" is
* encountered by the StringTokenizer.
*
* @param stringBuilder the <code>StringBuilder</code> to which tokens will be appended
* @param tokenizer the tokenizer producing the tokens
*/
private static void appendQuote(StringBuilder stringBuilder, StringTokenizer tokenizer) {
stringBuilder.append("\""); // We know that the previous token was \"
String token = "";
while (tokenizer.hasMoreTokens() && !"\"".equals(token)) {
token = tokenizer.nextToken();
stringBuilder.append(token);
}
}
/**
* Evaluates the given pattern to the given bibentry and database
*
* @param entry The entry to get the field value from
* @param pattern A pattern string (such as auth, pureauth, authorLast)
* @param keywordDelimiter The de
* @param database The database to use for field resolving. May be null.
* @return String containing the evaluation result. Empty string if the pattern cannot be resolved.
*/
public static String getFieldValue(BibEntry entry, String pattern, Character keywordDelimiter, BibDatabase database) {
try {
if (pattern.startsWith("auth") || pattern.startsWith("pureauth")) {
// result the author
String unparsedAuthors = entry.getResolvedFieldOrAlias(StandardField.AUTHOR, database).orElse("");
if (pattern.startsWith("pure")) {
// "pure" is used in the context of authors to resolve to authors only and not fallback to editors
// The other functionality of the pattern "ForeIni", ... is the same
// Thus, remove the "pure" prefix so the remaining code in this section functions correctly
//
pattern = pattern.substring(4);
} else if (unparsedAuthors.isEmpty()) {
// special feature: A pattern starting with "auth" falls back to the editor
unparsedAuthors = entry.getResolvedFieldOrAlias(StandardField.EDITOR, database).orElse("");
}
AuthorList authorList = createAuthorList(unparsedAuthors);
// Gather all author-related checks, so we don't
// have to check all the time.
switch (pattern) {
case "auth":
return firstAuthor(authorList);
case "authForeIni":
return firstAuthorForenameInitials(authorList);
case "authFirstFull":
return firstAuthorVonAndLast(authorList);
case "authors":
return allAuthors(authorList);
case "authorsAlpha":
return authorsAlpha(authorList);
case "authorLast":
return lastAuthor(authorList);
case "authorLastForeIni":
return lastAuthorForenameInitials(authorList);
case "authorIni":
return oneAuthorPlusInitials(authorList);
case "auth.auth.ea":
return authAuthEa(authorList);
case "auth.etal":
return authEtal(authorList, ".", ".etal");
case "authEtAl":
return authEtal(authorList, "", "EtAl");
case "authshort":
return authShort(authorList);
}
if (pattern.matches("authIni[\\d]+")) {
int num = Integer.parseInt(pattern.substring(7));
return authIniN(authorList, num);
} else if (pattern.matches("auth[\\d]+_[\\d]+")) {
String[] nums = pattern.substring(4).split("_");
return authNofMth(authorList, Integer.parseInt(nums[0]),
Integer.parseInt(nums[1]));
} else if (pattern.matches("auth\\d+")) {
// authN. First N chars of the first author's last name.
int num = Integer.parseInt(pattern.substring(4));
return authN(authorList, num);
} else if (pattern.matches("authors\\d+")) {
return nAuthors(authorList, Integer.parseInt(pattern.substring(7)));
} else {
// This "auth" business was a dead end, so just
// use it literally:
return entry.getResolvedFieldOrAlias(FieldFactory.parseField(pattern), database).orElse("");
}
} else if (pattern.startsWith("ed")) {
// Gather all markers starting with "ed" here, so we
// don't have to check all the time.
String unparsedEditors = entry.getResolvedFieldOrAlias(StandardField.EDITOR, database).orElse("");
AuthorList editorList = createAuthorList(unparsedEditors);
switch (pattern) {
case "edtr":
return firstAuthor(editorList);
case "edtrForeIni":
return firstAuthorForenameInitials(editorList);
case "editors":
return allAuthors(editorList);
case "editorLast":
return lastAuthor(editorList); // Last author's last name
case "editorLastForeIni":
return lastAuthorForenameInitials(editorList);
case "editorIni":
return oneAuthorPlusInitials(editorList);
case "edtr.edtr.ea":
return authAuthEa(editorList);
case "edtrshort":
return authShort(editorList);
}
if (pattern.matches("edtrIni[\\d]+")) {
int num = Integer.parseInt(pattern.substring(7));
return authIniN(editorList, num);
} else if (pattern.matches("edtr[\\d]+_[\\d]+")) {
String[] nums = pattern.substring(4).split("_");
return authNofMth(editorList,
Integer.parseInt(nums[0]),
Integer.parseInt(nums[1]));
} else if (pattern.matches("edtr\\d+")) {
String fa = firstAuthor(editorList);
int num = Integer.parseInt(pattern.substring(4));
if (num > fa.length()) {
num = fa.length();
}
return fa.substring(0, num);
} else {
// This "ed" business was a dead end, so just
// use it literally:
return entry.getResolvedFieldOrAlias(FieldFactory.parseField(pattern), database).orElse("");
}
} else if ("firstpage".equals(pattern)) {
return firstPage(entry.getResolvedFieldOrAlias(StandardField.PAGES, database).orElse(""));
} else if ("pageprefix".equals(pattern)) {
return pagePrefix(entry.getResolvedFieldOrAlias(StandardField.PAGES, database).orElse(""));
} else if ("lastpage".equals(pattern)) {
return lastPage(entry.getResolvedFieldOrAlias(StandardField.PAGES, database).orElse(""));
} else if ("title".equals(pattern)) {
return camelizeSignificantWordsInTitle(entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse(""));
} else if ("fulltitle".equals(pattern)) {
return entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse("");
} else if ("shorttitle".equals(pattern)) {
return getTitleWords(3,
removeSmallWords(entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse("")));
} else if ("shorttitleINI".equals(pattern)) {
return keepLettersAndDigitsOnly(
applyModifiers(getTitleWordsWithSpaces(3, entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse("")),
Collections.singletonList("abbr"), 0, Function.identity()));
} else if ("veryshorttitle".equals(pattern)) {
return getTitleWords(1,
removeSmallWords(entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse("")));
} else if ("camel".equals(pattern)) {
return getCamelizedTitle(entry.getResolvedFieldOrAlias(StandardField.TITLE, database).orElse(""));
} else if ("shortyear".equals(pattern)) {
String yearString = entry.getResolvedFieldOrAlias(StandardField.YEAR, database).orElse("");
if (yearString.isEmpty()) {
return yearString;
// In press/in preparation/submitted
} else if (yearString.startsWith("in") || yearString.startsWith("sub")) {
return "IP";
} else if (yearString.length() > 2) {
return yearString.substring(yearString.length() - 2);
} else {
return yearString;
}
} else if ("entrytype".equals(pattern)) {
return entry.getResolvedFieldOrAlias(InternalField.TYPE_HEADER, database).orElse("");
} else if (pattern.matches("keyword\\d+")) {
// according to LabelPattern.php, it returns keyword number n
int num = Integer.parseInt(pattern.substring(7));
KeywordList separatedKeywords = entry.getResolvedKeywords(keywordDelimiter, database);
if (separatedKeywords.size() < num) {
// not enough keywords
return "";
} else {
// num counts from 1 to n, but index in arrayList count from 0 to n-1
return separatedKeywords.get(num - 1).toString();
}
} else if (pattern.matches("keywords\\d*")) {
// return all keywords, not separated
int num;
if (pattern.length() > 8) {
num = Integer.parseInt(pattern.substring(8));
} else {
num = Integer.MAX_VALUE;
}
KeywordList separatedKeywords = entry.getResolvedKeywords(keywordDelimiter, database);
StringBuilder sb = new StringBuilder();
int i = 0;
for (Keyword keyword : separatedKeywords) {
// remove all spaces
sb.append(keyword.toString().replaceAll("\\s+", ""));
i++;
if (i >= num) {
break;
}
}
return sb.toString();
} else {
// we haven't seen any special demands
return entry.getResolvedFieldOrAlias(FieldFactory.parseField(pattern), database).orElse("");
}
} catch (NullPointerException ex) {
LOGGER.debug("Problem making expanding bracketed expression", ex);
return "";
}
}
/**
* Parses the provided string to an {@link AuthorList}, which are then formatted by {@link LatexToUnicodeAdapter}.
* Afterward, any institutions are formatted into an institution key.
*
* @param unparsedAuthors a string representation of authors or editors
* @return an {@link AuthorList} consisting of authors and institution keys with resolved latex.
*/
private static AuthorList createAuthorList(String unparsedAuthors) {
return AuthorList.parse(unparsedAuthors).getAuthors().stream()
.map(author -> {
// If the author is an institution, use an institution key instead of the full name
String lastName = author.getLast()
.map(lastPart -> isInstitution(author) ?
generateInstitutionKey(lastPart) :
LatexToUnicodeAdapter.format(lastPart))
.orElse(null);
return new Author(
author.getFirst().map(LatexToUnicodeAdapter::format).orElse(null),
author.getFirstAbbr().map(LatexToUnicodeAdapter::format).orElse(null),
author.getVon().map(LatexToUnicodeAdapter::format).orElse(null),
lastName,
author.getJr().map(LatexToUnicodeAdapter::format).orElse(null));
})
.collect(AuthorList.collect());
}
/**
* Checks if an author is an institution which can get a citation key from {@link #generateInstitutionKey(String)}.
*
* @param author the checked author
* @return true if only the last name is present and it contains at least one whitespace character.
*/
private static boolean isInstitution(Author author) {
return author.getFirst().isEmpty() && author.getFirstAbbr().isEmpty() && author.getJr().isEmpty()
&& author.getVon().isEmpty() && author.getLast().isPresent()
&& WHITESPACE.matcher(author.getLast().get()).find();
}
/**
* Applies modifiers to a label generated based on a field marker.
*
* @param label The generated label.
* @param parts String array containing the modifiers.
* @param offset The number of initial items in the modifiers array to skip.
* @param expandBracketContent a function to expand the content in the parentheses.
* @return The modified label.
*/
static String applyModifiers(final String label, final List<String> parts, final int offset, Function<String, String> expandBracketContent) {
String resultingLabel = label;
for (int j = offset; j < parts.size(); j++) {
String modifier = parts.get(j);
if ("abbr".equals(modifier)) {
// Abbreviate - that is,
StringBuilder abbreviateSB = new StringBuilder();
String[] words = resultingLabel.replaceAll("[\\{\\}']", "")
.split("[\\(\\) \r\n\"]");
for (String word : words) {
if (!word.isEmpty()) {
abbreviateSB.append(word.charAt(0));
}
}
resultingLabel = abbreviateSB.toString();
} else {
Optional<Formatter> formatter = Formatters.getFormatterForModifier(modifier);
if (formatter.isPresent()) {
resultingLabel = formatter.get().format(resultingLabel);
} else if (!modifier.isEmpty() && (modifier.length() >= 2) && (modifier.charAt(0) == '(') && modifier.endsWith(")")) {
// Alternate text modifier in parentheses. Should be inserted if the label is empty
if (label.isEmpty() && (modifier.length() > 2)) {
resultingLabel = expandBrackets(modifier.substring(1, modifier.length() - 1), expandBracketContent);
}
} else {
LOGGER.warn("Key generator warning: unknown modifier '{}'.", modifier);
}
}
}
return resultingLabel;
}
/**
* Determines "number" words out of the "title" field in the given BibTeX entry
*/
public static String getTitleWords(int number, String title) {
return getTitleWordsWithSpaces(number, title);
}
/**
* Removes any '-', unnecessary whitespace and latex commands formatting
*/
private static String formatTitle(String title) {
String ss = new RemoveLatexCommandsFormatter().format(title);
StringBuilder stringBuilder = new StringBuilder();
StringBuilder current;
int piv = 0;
while (piv < ss.length()) {
current = new StringBuilder();
// Get the next word:
while ((piv < ss.length()) && !Character.isWhitespace(ss.charAt(piv))
&& (ss.charAt(piv) != '-')) {
current.append(ss.charAt(piv));
piv++;
}
piv++;
// Check if it is ok:
String word = current.toString().trim();
if (word.isEmpty()) {
continue;
}
// If we get here, the word was accepted.
if (stringBuilder.length() > 0) {
stringBuilder.append(' ');
}
stringBuilder.append(word);
}
return stringBuilder.toString();
}
/**
* Capitalises and concatenates the words out of the "title" field in the given BibTeX entry
*/
public static String getCamelizedTitle(String title) {
return keepLettersAndDigitsOnly(camelizeTitle(title));
}
private static String camelizeTitle(String title) {
StringBuilder stringBuilder = new StringBuilder();
String formattedTitle = formatTitle(title);
try (Scanner titleScanner = new Scanner(formattedTitle)) {
while (titleScanner.hasNext()) {
String word = titleScanner.next();
// Camelize the word
word = word.substring(0, 1).toUpperCase(Locale.ROOT) + word.substring(1);
if (stringBuilder.length() > 0) {
stringBuilder.append(' ');
}
stringBuilder.append(word);
}
}
return stringBuilder.toString();
}
/**
* Capitalises the significant words of the "title" field in the given BibTeX entry
*/
public static String camelizeSignificantWordsInTitle(String title) {
StringJoiner stringJoiner = new StringJoiner(" ");
String formattedTitle = formatTitle(title);
try (Scanner titleScanner = new Scanner(formattedTitle)) {
while (titleScanner.hasNext()) {
String word = titleScanner.next();
// Camelize the word if it is significant
boolean camelize = !Word.SMALLER_WORDS.contains(word.toLowerCase(Locale.ROOT));
// We want to capitalize significant words and the first word of the title
if (camelize || (stringJoiner.length() == 0)) {
word = word.substring(0, 1).toUpperCase(Locale.ROOT) + word.substring(1);
} else {
word = word.substring(0, 1).toLowerCase(Locale.ROOT) + word.substring(1);
}
stringJoiner.add(word);
}
}
return stringJoiner.toString();
}
public static String removeSmallWords(String title) {
String formattedTitle = formatTitle(title);
try (Scanner titleScanner = new Scanner(formattedTitle)) {
return titleScanner.tokens()
.filter(Predicate.not(
Word::isSmallerWord))
.collect(Collectors.joining(" "));
}
}
private static String getTitleWordsWithSpaces(int number, String title) {
String formattedTitle = formatTitle(title);
try (Scanner titleScanner = new Scanner(formattedTitle)) {
return titleScanner.tokens()
.limit(number)
.collect(Collectors.joining(" "));
}
}
private static String keepLettersAndDigitsOnly(String in) {
return in.codePoints()
.filter(Character::isLetterOrDigit)
.collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
.toString();
}
/**
* Gets the last name of the first author/editor
*
* @param authorList an {@link AuthorList}
* @return the surname of an author/editor or the von part if no lastname is prsent or "" if no author was found or both firstname+lastname are empty
* This method is guaranteed to never return null.
*/
private static String firstAuthor(AuthorList authorList) {
return authorList.getAuthors().stream()
.findFirst()
.flatMap(author -> author.getLast().isPresent() ? author.getLast() : author.getVon())
.orElse("");
}
/**
* Gets the first name initials of the first author/editor
*
* @param authorList an {@link AuthorList}
* @return the first name initial of an author/editor or "" if no author was found This method is guaranteed to
* never return null.
*/
private static String firstAuthorForenameInitials(AuthorList authorList) {
return authorList.getAuthors().stream()
.findFirst()
.flatMap(Author::getFirstAbbr)
.map(s -> s.substring(0, 1))
.orElse("");
}
/**
* Gets the von part and the last name of the first author/editor. No spaces are returned.
*
* @param authorList an {@link AuthorList}
* @return the von part and surname of an author/editor or "" if no author was found. This method is guaranteed to
* never return null.
*/
private static String firstAuthorVonAndLast(AuthorList authorList) {
return authorList.isEmpty() ? "" :
authorList.getAuthor(0).getLastOnly().replaceAll(" ", "");
}
/**
* Gets the last name of the last author/editor
*
* @param authorList an {@link AuthorList}
* @return the surname of an author/editor
*/
private static String lastAuthor(AuthorList authorList) {
if (authorList.isEmpty()) {
return "";
}
return authorList.getAuthors().get(authorList.getNumberOfAuthors() - 1).getLast().orElse("");
}
/**
* Gets the forename initials of the last author/editor
*
* @param authorList an {@link AuthorList}
* @return the forename initial of an author/editor or "" if no author was found This method is guaranteed to never
* return null.
*/
private static String lastAuthorForenameInitials(AuthorList authorList) {
if (authorList.isEmpty()) {
return "";
}
return authorList.getAuthor(authorList.getNumberOfAuthors() - 1).getFirstAbbr().map(s -> s.substring(0, 1))
.orElse("");
}
/**
* Gets the last name of all authors/editors.
* Pattern <code>[authors]</code>.
* <p>
* <code>and others</code> is converted to <code>EtAl</code>
* </p>
*
* @param authorList an {@link AuthorList}
* @return the surname of all authors/editors
*/
static String allAuthors(AuthorList authorList) {
return joinAuthorsOnLastName(authorList, authorList.getNumberOfAuthors(), "", "EtAl");
}
/**
* Returns the authors according to the BibTeX-alpha-Style
*
* @param authorList an {@link AuthorList}
* @return the initials of all authors' names
*/
static String authorsAlpha(AuthorList authorList) {
StringBuilder alphaStyle = new StringBuilder();
int maxAuthors;
final boolean maxAuthorsExceeded;
if (authorList.getNumberOfAuthors() <= MAX_ALPHA_AUTHORS) {
maxAuthors = authorList.getNumberOfAuthors();
maxAuthorsExceeded = false;
} else {
maxAuthors = MAX_ALPHA_AUTHORS - 1;
maxAuthorsExceeded = true;
}
if (authorList.getNumberOfAuthors() == 1) {
String[] firstAuthor = authorList.getAuthor(0).getLastOnly()
.replaceAll("\\s+", " ").trim().split(" ");
// take first letter of any "prefixes" (e.g. van der Aalst -> vd)
for (int j = 0; j < (firstAuthor.length - 1); j++) {
alphaStyle.append(firstAuthor[j], 0, 1);
}
// append last part of last name completely
alphaStyle.append(firstAuthor[firstAuthor.length - 1], 0,
Math.min(3, firstAuthor[firstAuthor.length - 1].length()));
} else {
boolean andOthersPresent = authorList.getAuthor(maxAuthors - 1).equals(Author.OTHERS);
if (andOthersPresent) {
maxAuthors--;
}
List<String> vonAndLastNames = authorList.getAuthors().stream()
.limit(maxAuthors)
.map(Author::getLastOnly)
.collect(Collectors.toList());
for (String vonAndLast : vonAndLastNames) {
// replace all whitespaces by " "
// split the lastname at " "
String[] nameParts = vonAndLast.replaceAll("\\s+", " ").trim().split(" ");
for (String part : nameParts) {
// use first character of each part of lastname
alphaStyle.append(part, 0, 1);
}
}
if (andOthersPresent || maxAuthorsExceeded) {
alphaStyle.append("+");
}
}
return alphaStyle.toString();
}
/**
* Creates a string with all last names separated by a `delimiter`. If the number of authors are larger than
* `maxAuthors`, replace all excess authors with `suffix`.
*
* @param authorList the list of authors
* @param maxAuthors the maximum number of authors in the string
* @param delimiter delimiter separating the last names of the authors
* @param suffix to replace excess authors with. Also used to replace <code>and others</code>.
* @return a string consisting of authors' last names separated by a `delimiter` and with any authors excess of
* `maxAuthors` replaced with `suffix`
*/
private static String joinAuthorsOnLastName(AuthorList authorList, int maxAuthors, String delimiter, final String suffix) {
final String finalSuffix = authorList.getNumberOfAuthors() > maxAuthors ? suffix : "";
return authorList.getAuthors().stream()
.map(author -> {
if (author.equals(Author.OTHERS)) {
if (suffix.startsWith(delimiter)) {
return Optional.of(suffix.substring(delimiter.length()));
} else {
return Optional.of(suffix);
}
} else {
return author.getLast();
}
})
.flatMap(Optional::stream)
.limit(maxAuthors)
.collect(Collectors.joining(delimiter, "", finalSuffix));
}
/**
* Gets the surnames of the first N authors and appends EtAl if there are more than N authors
*
* @param authorList an {@link AuthorList}
* @param n the number of desired authors
* @return Gets the surnames of the first N authors and appends EtAl if there are more than N authors
*/
private static String nAuthors(AuthorList authorList, int n) {
return joinAuthorsOnLastName(authorList, n, "", "EtAl");
}
/**
* Gets the first part of the last name of the first author/editor, and appends the last name initial of the
* remaining authors/editors. Maximum 5 characters
*
* @param authorList an <{@link AuthorList}
* @return the surname of all authors/editors
*/
static String oneAuthorPlusInitials(AuthorList authorList) {
if (authorList.isEmpty()) {
return "";
}
StringBuilder authorSB = new StringBuilder();
// authNofMth start index at 1 instead of 0
authorSB.append(authNofMth(authorList, CHARS_OF_FIRST, 1));
for (int i = 2; i <= authorList.getNumberOfAuthors(); i++) {
authorSB.append(authNofMth(authorList, 1, i));
}
return authorSB.toString();
}
static String authAuthEa(AuthorList authorList) {
return joinAuthorsOnLastName(authorList, 2, ".", ".ea");
}
/**
* auth.etal, authEtAl, ... format
*/
static String authEtal(AuthorList authorList, String delim, String append) {
if (authorList.isEmpty()) {
return "";
}
if ((authorList.getNumberOfAuthors() <= 2)
&& ((authorList.getNumberOfAuthors() == 1) || !authorList.getAuthor(1).equals(Author.OTHERS))) {
// in case 1 or two authors, just name them
// exception: If the second author is "and others", then do the appendix handling (in the other branch)
return joinAuthorsOnLastName(authorList, 2, delim, "");
} else {
return authorList.getAuthor(0).getLast().orElse("") + append;
}
}
/**
* The first N characters of the Mth author's or editor's last name. M starts counting from 1.
* In case the Mth author is {@link Author#OTHERS}, <code>+</code> is returned.
*/
private static String authNofMth(AuthorList authorList, int n, int m) {
// have m counting from 0
int mminusone = m - 1;
if ((authorList.getNumberOfAuthors() <= mminusone) || (n < 0) || (mminusone < 0)) {
return "";
}
Author lastAuthor = authorList.getAuthor(mminusone);
if (lastAuthor.equals(Author.OTHERS)) {
return "+";
}
String lastName = lastAuthor.getLast()
.map(CitationKeyGenerator::removeDefaultUnwantedCharacters).orElse("");
return lastName.length() > n ? lastName.substring(0, n) : lastName;
}
/**
* First N chars of the first author's last name.
*/
private static String authN(AuthorList authorList, int num) {
return authNofMth(authorList, num, 1);
}
/**
* authshort format
*/
static String authShort(AuthorList authorList) {
StringBuilder author = new StringBuilder();
final int numberOfAuthors = authorList.getNumberOfAuthors();
if (numberOfAuthors == 1) {
author.append(authorList.getAuthor(0).getLast().orElse(""));
} else if (numberOfAuthors >= 2) {
for (int i = 0; (i < numberOfAuthors) && (i < 3); i++) {
author.append(authNofMth(authorList, 1, i + 1));
}
if (numberOfAuthors > 3) {
author.append('+');
}
}
return author.toString();
}
/**
* authIniN format
*
* @param authorList The authors to format.
* @param n The maximum number of characters this string will be long. A negative number or zero will lead
* to "" be returned.
*/
static String authIniN(AuthorList authorList, int n) {
if ((n <= 0) || authorList.isEmpty()) {
return "";
}
final int numberOfAuthors = authorList.getNumberOfAuthors();
final boolean lastAuthorIsOthers = authorList.getAuthor(numberOfAuthors - 1).equals(Author.OTHERS);
if ((n > 1) && ((n < numberOfAuthors) || lastAuthorIsOthers)) {
final int limit = Math.min(n - 1, numberOfAuthors - 1);
// special handling if the last author is "Others"
// This gets the single char "+" only
AuthorList allButOthers = AuthorList.of(
authorList.getAuthors()
.stream()
.limit(limit)
.toList());
return authIniN(allButOthers, n - 1) + "+";
}
StringBuilder author = new StringBuilder();
int charsAll = n / numberOfAuthors;
for (int i = 0; i < numberOfAuthors; i++) {
if (i < (n % numberOfAuthors)) {
author.append(authNofMth(authorList, charsAll + 1, i + 1));
} else {
author.append(authNofMth(authorList, charsAll, i + 1));
}
}
if (author.length() <= n) {
return author.toString();
} else {
return author.substring(0, n);
}
}
/**
* Split the pages field into separate numbers and return the lowest
*
* @param pages (may not be null) a pages string such as 42--111 or 7,41,73--97 or 43+
* @return the first page number or "" if no number is found in the string
* @throws NullPointerException if pages is null
*/
public static String firstPage(String pages) {
// FIXME: incorrectly exracts the first page when pages are
// specified with ellipse, e.g. "213-6", which should stand
// for "213-216". S.G.
return NOT_DECIMAL_DIGIT.splitAsStream(pages)
.filter(Predicate.not(String::isBlank))
.map(BigInteger::new)
.min(BigInteger::compareTo)
.map(BigInteger::toString)
.orElse("");
}
/**
* Return the non-digit prefix of pages
*
* @param pages a pages string such as L42--111 or L7,41,73--97 or L43+
* @return the non-digit prefix of pages (like "L" of L7) or "" if no non-digit prefix is found in the string
* @throws NullPointerException if pages is null.
*/
public static String pagePrefix(String pages) {
if (pages.matches("^\\D+.*$")) {
return (pages.split("\\d+"))[0];
} else {
return "";
}
}
/**
* Split the pages field into separate numbers and return the highest
*
* @param pages a pages string such as 42--111 or 7,41,73--97 or 43+
* @return the first page number or "" if no number is found in the string
* @throws NullPointerException if pages is null.
*/
public static String lastPage(String pages) {
return NOT_DECIMAL_DIGIT.splitAsStream(pages)
.filter(Predicate.not(String::isBlank))
.map(BigInteger::new)
.max(BigInteger::compareTo)
.map(BigInteger::toString)
.orElse("");
}
/**
* Parse a field marker with modifiers, possibly containing a parenthesised modifier, as well as escaped colons and
* parentheses.
*
* @param arg The argument string.
* @return An array of strings representing the parts of the marker
*/
protected static List<String> parseFieldAndModifiers(String arg) {
List<String> parts = new ArrayList<>();
StringBuilder current = new StringBuilder();
boolean escaped = false;
int inParenthesis = 0;
for (int i = 0; i < arg.length(); i++) {
char currentChar = arg.charAt(i);
if ((currentChar == ':') && !escaped && (inParenthesis == 0)) {
parts.add(current.toString());
current = new StringBuilder();
} else if ((currentChar == '(') && !escaped) {
inParenthesis++;
current.append(currentChar);
} else if ((currentChar == ')') && !escaped && (inParenthesis > 0)) {
inParenthesis--;
current.append(currentChar);
} else if (currentChar == '\\') {
if (escaped) {
escaped = false;
current.append(currentChar);
} else {
escaped = true;
}
} else if (escaped) {
current.append(currentChar);
escaped = false;
} else {
current.append(currentChar);
}
}
parts.add(current.toString());
return parts;
}
/**
* <p>
* An author or editor may be and institution not a person. In that case the key generator builds very long keys,
* e.g.: for “The Attributed Graph Grammar System (AGG)” -> “TheAttributedGraphGrammarSystemAGG”.
* </p>
*
* <p>
* An institution name should be inside <code>{}</code> brackets. If the institution name includes its abbreviation
* this abbreviation should be in <code>{}</code> brackets. For the previous example the value should look like:
* <code>{The Attributed Graph Grammar System ({AGG})}</code>.
* </p>
*
* <p>
* If an institution includes its abbreviation, i.e. "...({XYZ})", first such abbreviation should be used as the key
* value part of such author.
* </p>
*
* <p>
* If an institution does not include its abbreviation the key should be generated from its name in the following
* way:
* </p>
*
* <p>
* The institution value can contain: institution name, part of the institution, address, etc. These values should
* be comma separated. Institution name and possible part of the institution should be in the beginning, while
* address and secondary information should be in the end.
* </p>
* <p>
* Each part is examined separately:
* <ol>
* <li>We remove all tokens of a part which are one of the defined ignore words (the, press), which end with a dot
* (ltd., co., ...) and which first character is lowercase (of, on, di, ...).</li>
* <li>We detect the types of the part: university, technology institute,
* department, school, rest
* <ul>
* <li>University: <code>"Uni[NameOfTheUniversity]"</code></li>
* <li>Department: If the institution value contains more than one comma separated part, the department will be an
* abbreviation of all words beginning with the uppercase letter except of words:
* <code>d[ei]p.*</code>, school, faculty</li>
* <li>School: same as department</li>
* <li>Rest: If there are less than 3 tokens in such part than the result
* is a concatenation of those tokens. Otherwise, the result will be built
* from the first letter in each token.</li>
* </ul>
* </ol>
* <p>
* Parts are concatenated together in the following way:
* <ul>
* <li>If there is a university part use it otherwise use the rest part.</li>
* <li>If there is a school part append it.</li>
* <li>If there is a department part and it is not same as school part
* append it.</li>
* </ul>
* <p>
* Rest part is only the first part which do not match any other type. All
* other parts (address, ...) are ignored.
*
* @param content the institution to generate a Bibtex key for
* @return <ul>
* <li>the institution key</li>
* <li>"" in the case of a failure</li>
* <li>null if content is null</li>
* </ul>
*/
private static String generateInstitutionKey(String content) {
if (content == null) {
return null;
}
if (content.isBlank()) {
return "";
}
Matcher matcher = INLINE_ABBREVIATION.matcher(content);
if (matcher.find()) {
return LatexToUnicodeAdapter.format(matcher.group());
}
Optional<String> unicodeFormattedName = LatexToUnicodeAdapter.parse(content);
if (unicodeFormattedName.isEmpty()) {
LOGGER.warn("{} could not be converted to unicode. This can result in an incorrect or missing institute citation key", content);
}
String result = unicodeFormattedName.orElse(Normalizer.normalize(content, Normalizer.Form.NFC));
// Special characters can't be allowed past this point because the citation key generator might replace them with multiple mixed-case characters
result = StringUtil.replaceSpecialCharacters(result);
String[] institutionNameTokens = result.split(",");
// Key parts
String university = null;
String department = null;
String school = null;
String rest = null;
for (int index = 0; index < institutionNameTokens.length; index++) {
List<String> tokenParts = getValidInstitutionNameParts(institutionNameTokens[index]);
EnumSet<Institution> tokenTypes = Institution.findTypes(tokenParts);
if (tokenTypes.contains(Institution.UNIVERSITY)) {
StringBuilder universitySB = new StringBuilder();
// University part looks like: Uni[NameOfTheUniversity]
universitySB.append("Uni");
for (String k : tokenParts) {
if (!"uni".regionMatches(true, 0, k, 0, 3)) {
universitySB.append(k);
}
}
university = universitySB.toString();
// If university is detected than the previous part is suggested
// as department
if ((index > 0) && (department == null)) {
department = institutionNameTokens[index - 1];
}
} else if ((tokenTypes.contains(Institution.SCHOOL)
|| tokenTypes.contains(Institution.DEPARTMENT))
&& (institutionNameTokens.length > 1)) {
// School is an abbreviation of all the words beginning with a
// capital letter excluding: department, school and faculty words.
StringBuilder schoolSB = new StringBuilder();
StringBuilder departmentSB = new StringBuilder();
for (String k : tokenParts) {
if (noOtherInstitutionKeyWord(k)) {
if (tokenTypes.contains(Institution.SCHOOL)) {
schoolSB.append(NOT_CAPITAL_CHARACTER.matcher(k).replaceAll(""));
}
// Explicitly defined department part is build the same way as school
if (tokenTypes.contains(Institution.DEPARTMENT)) {
departmentSB.append(NOT_CAPITAL_CHARACTER.matcher(k).replaceAll(""));
}
}
}
if (tokenTypes.contains(Institution.SCHOOL)) {
school = schoolSB.toString();
}
if (tokenTypes.contains(Institution.DEPARTMENT)) {
department = departmentSB.toString();
}
} else if (rest == null) {
// A part not matching university, department nor school
if (tokenParts.size() >= 3) {
// If there are more than 3 parts, only keep the first character of each word
final int[] codePoints = tokenParts.stream()
.filter(Predicate.not(String::isBlank))
.mapToInt(s -> s.codePointAt(0))
.toArray();
rest = new String(codePoints, 0, codePoints.length);
} else {
rest = String.join("", tokenParts);
}
}
}
// Putting parts together.
return (university == null ? Objects.toString(rest, "") : university)
+ (school == null ? "" : school)
+ ((department == null)
|| ((school != null) && department.equals(school)) ? "" : department);
}
/**
* Helper method for {@link BracketedPattern#generateInstitutionKey(String)}. Checks that the word is not an
* institution keyword and has an uppercase first letter, except univ/tech key word.
*
* @param word to check
*/
private static boolean noOtherInstitutionKeyWord(String word) {
return !DEPARTMENTS.matcher(word).matches()
&& !StandardField.SCHOOL.getName().equalsIgnoreCase(word)
&& !"faculty".equalsIgnoreCase(word)
&& !NOT_CAPITAL_CHARACTER.matcher(word).replaceAll("").isEmpty();
}
private static List<String> getValidInstitutionNameParts(String name) {
List<String> nameParts = new ArrayList<>();
List<String> ignore = Arrays.asList("press", "the");
// Cleanup: remove unnecessary words.
for (String part : name.replaceAll("\\{[A-Z]+}", "").split("[ \\-_]")) {
if ((!(part.isEmpty()) // remove empty
&& !ignore.contains(part.toLowerCase(Locale.ENGLISH)) // remove ignored words
&& (part.charAt(part.length() - 1) != '.')
&& Character.isUpperCase(part.charAt(0)))
|| ((part.length() >= 3) && "uni".equalsIgnoreCase(part.substring(0, 3)))) {
nameParts.add(part);
}
}
return nameParts;
}
}
| 58,328
| 43.424219
| 154
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/CitationKeyGenerator.java
|
package org.jabref.logic.citationkeypattern;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.regex.PatternSyntaxException;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.strings.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the utility class of the LabelPattern package.
*/
public class CitationKeyGenerator extends BracketedPattern {
/**
* All single characters that we can use for extending a key to make it unique.
*/
public static final String APPENDIX_CHARACTERS = "abcdefghijklmnopqrstuvwxyz";
/**
* List of unwanted characters. These will be removed at the end.
* Note that <code>+</code> is a wanted character to indicate "et al." in authorsAlpha.
* Example: "ABC+". See {@link org.jabref.logic.citationkeypattern.BracketedPatternTest#authorsAlpha()} for examples.
*/
public static final String DEFAULT_UNWANTED_CHARACTERS = "-`ʹ:!;?^";
private static final Logger LOGGER = LoggerFactory.getLogger(CitationKeyGenerator.class);
// Source of disallowed characters : https://tex.stackexchange.com/a/408548/9075
private static final List<Character> DISALLOWED_CHARACTERS = Arrays.asList('{', '}', '(', ')', ',', '=', '\\', '"', '#', '%', '~', '\'');
private final AbstractCitationKeyPattern citeKeyPattern;
private final BibDatabase database;
private final CitationKeyPatternPreferences citationKeyPatternPreferences;
private final String unwantedCharacters;
public CitationKeyGenerator(BibDatabaseContext bibDatabaseContext, CitationKeyPatternPreferences citationKeyPatternPreferences) {
this(bibDatabaseContext.getMetaData().getCiteKeyPattern(citationKeyPatternPreferences.getKeyPattern()),
bibDatabaseContext.getDatabase(),
citationKeyPatternPreferences);
}
public CitationKeyGenerator(AbstractCitationKeyPattern citeKeyPattern, BibDatabase database, CitationKeyPatternPreferences citationKeyPatternPreferences) {
this.citeKeyPattern = Objects.requireNonNull(citeKeyPattern);
this.database = Objects.requireNonNull(database);
this.citationKeyPatternPreferences = Objects.requireNonNull(citationKeyPatternPreferences);
this.unwantedCharacters = citationKeyPatternPreferences.getUnwantedCharacters();
}
/**
* Computes an appendix to a citation key that could make it unique. We use a-z for numbers 0-25, and then aa-az, ba-bz, etc.
*
* @param number The appendix number.
* @return The String to append.
*/
private static String getAppendix(int number) {
if (number >= APPENDIX_CHARACTERS.length()) {
int lastChar = number % APPENDIX_CHARACTERS.length();
return getAppendix((number / APPENDIX_CHARACTERS.length()) - 1) + APPENDIX_CHARACTERS.charAt(lastChar);
} else {
return APPENDIX_CHARACTERS.substring(number, number + 1);
}
}
public static String removeDefaultUnwantedCharacters(String key) {
return removeUnwantedCharacters(key, DEFAULT_UNWANTED_CHARACTERS);
}
public static String removeUnwantedCharacters(String key, String unwantedCharacters) {
String newKey = key.chars()
.filter(c -> unwantedCharacters.indexOf(c) == -1)
.filter(c -> !DISALLOWED_CHARACTERS.contains((char) c))
.collect(StringBuilder::new,
StringBuilder::appendCodePoint, StringBuilder::append)
.toString();
// Replace non-English characters like umlauts etc. with a sensible
// letter or letter combination that bibtex can accept.
return StringUtil.replaceSpecialCharacters(newKey);
}
public static String cleanKey(String key, String unwantedCharacters) {
return removeUnwantedCharacters(key, unwantedCharacters).replaceAll("\\s", "");
}
/**
* Generate a citation key for the given {@link BibEntry}.
*
* @param entry a {@link BibEntry}
* @return a citation key based on the user's preferences
*/
public String generateKey(BibEntry entry) {
Objects.requireNonNull(entry);
String currentKey = entry.getCitationKey().orElse(null);
String newKey = createCitationKeyFromPattern(entry);
newKey = replaceWithRegex(newKey);
newKey = appendLettersToKey(newKey, currentKey);
return cleanKey(newKey, unwantedCharacters);
}
/**
* A letter will be appended to the key based on the user's preferences, either always or to prevent duplicated keys.
*
* @param key the new key
* @param oldKey the old key
* @return a key, if needed, with an appended letter
*/
private String appendLettersToKey(String key, String oldKey) {
long occurrences = database.getNumberOfCitationKeyOccurrences(key);
if ((occurrences > 0) && Objects.equals(oldKey, key)) {
occurrences--; // No change, so we can accept one dupe.
}
boolean alwaysAddLetter = citationKeyPatternPreferences.getKeySuffix()
== CitationKeyPatternPreferences.KeySuffix.ALWAYS;
if (alwaysAddLetter || occurrences != 0) {
// The key is already in use, so we must modify it.
boolean firstLetterA = citationKeyPatternPreferences.getKeySuffix()
== CitationKeyPatternPreferences.KeySuffix.SECOND_WITH_A;
int number = !alwaysAddLetter && !firstLetterA ? 1 : 0;
String moddedKey;
do {
moddedKey = key + getAppendix(number);
number++;
occurrences = database.getNumberOfCitationKeyOccurrences(moddedKey);
// only happens if #getAddition() is buggy
if (Objects.equals(oldKey, moddedKey)) {
occurrences--;
}
} while (occurrences > 0);
key = moddedKey;
}
return key;
}
/**
* Using preferences, replace matches to the provided regex with a string.
*
* @param key the citation key
* @return the citation key where matches to the regex are replaced
*/
private String replaceWithRegex(String key) {
// Remove Regular Expressions while generating Keys
String regex = citationKeyPatternPreferences.getKeyPatternRegex();
if ((regex != null) && !regex.trim().isEmpty()) {
String replacement = citationKeyPatternPreferences.getKeyPatternReplacement();
try {
key = key.replaceAll(regex, replacement);
} catch (PatternSyntaxException e) {
LOGGER.warn("There is a syntax error in the regular expression \"{}\" used to generate a citation key", regex, e);
}
}
return key;
}
private String createCitationKeyFromPattern(BibEntry entry) {
// get the type of entry
EntryType entryType = entry.getType();
// Get the arrayList corresponding to the type
List<String> citationKeyPattern = citeKeyPattern.getValue(entryType);
if (citationKeyPattern.isEmpty()) {
return "";
}
return expandBrackets(citationKeyPattern.get(0), expandBracketContent(entry));
}
/**
* A helper method to create a {@link Function} that takes a single bracketed expression, expands it, and cleans the key.
*
* @param entry the {@link BibEntry} that a citation key is generated for
* @return a cleaned citation key for the given {@link BibEntry}
*/
private Function<String, String> expandBracketContent(BibEntry entry) {
Character keywordDelimiter = citationKeyPatternPreferences.getKeywordDelimiter();
return (String bracket) -> {
String expandedPattern;
List<String> fieldParts = parseFieldAndModifiers(bracket);
expandedPattern = removeUnwantedCharacters(getFieldValue(entry, fieldParts.get(0), keywordDelimiter, database), unwantedCharacters);
// check whether there is a modifier on the end such as
// ":lower":
if (fieldParts.size() > 1) {
// apply modifiers:
expandedPattern = applyModifiers(expandedPattern, fieldParts, 1, expandBracketContent(entry));
}
return cleanKey(expandedPattern, unwantedCharacters);
};
}
/**
* Generates a citation key for the given entry, and sets the key.
*
* @param entry the entry to generate the key for
* @return the change to the key (or an empty optional if the key was not changed)
*/
public Optional<FieldChange> generateAndSetKey(BibEntry entry) {
String newKey = generateKey(entry);
return entry.setCitationKey(newKey);
}
}
| 9,182
| 41.123853
| 159
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/CitationKeyPatternPreferences.java
|
package org.jabref.logic.citationkeypattern;
import javafx.beans.property.BooleanProperty;
import javafx.beans.property.ObjectProperty;
import javafx.beans.property.ReadOnlyObjectProperty;
import javafx.beans.property.SimpleBooleanProperty;
import javafx.beans.property.SimpleObjectProperty;
import javafx.beans.property.SimpleStringProperty;
import javafx.beans.property.StringProperty;
public class CitationKeyPatternPreferences {
public enum KeySuffix {
ALWAYS, // CiteKeyA, CiteKeyB, CiteKeyC ...
SECOND_WITH_A, // CiteKey, CiteKeyA, CiteKeyB ...
SECOND_WITH_B // CiteKey, CiteKeyB, CiteKeyC ...
}
private final BooleanProperty shouldAvoidOverwriteCiteKey = new SimpleBooleanProperty();
private final BooleanProperty shouldWarnBeforeOverwriteCiteKey = new SimpleBooleanProperty();
private final BooleanProperty shouldGenerateCiteKeysBeforeSaving = new SimpleBooleanProperty();
private final ObjectProperty<KeySuffix> keySuffix = new SimpleObjectProperty<>();
private final StringProperty keyPatternRegex = new SimpleStringProperty();
private final StringProperty keyPatternReplacement = new SimpleStringProperty();
private final StringProperty unwantedCharacters = new SimpleStringProperty();
private final ObjectProperty<GlobalCitationKeyPattern> keyPattern = new SimpleObjectProperty<>();
private final String defaultPattern;
private final ReadOnlyObjectProperty<Character> keywordDelimiter;
public CitationKeyPatternPreferences(boolean shouldAvoidOverwriteCiteKey,
boolean shouldWarnBeforeOverwriteCiteKey,
boolean shouldGenerateCiteKeysBeforeSaving,
KeySuffix keySuffix,
String keyPatternRegex,
String keyPatternReplacement,
String unwantedCharacters,
GlobalCitationKeyPattern keyPattern,
String defaultPattern,
ReadOnlyObjectProperty<Character> keywordDelimiter) {
this.shouldAvoidOverwriteCiteKey.set(shouldAvoidOverwriteCiteKey);
this.shouldWarnBeforeOverwriteCiteKey.set(shouldWarnBeforeOverwriteCiteKey);
this.shouldGenerateCiteKeysBeforeSaving.set(shouldGenerateCiteKeysBeforeSaving);
this.keySuffix.set(keySuffix);
this.keyPatternRegex.set(keyPatternRegex);
this.keyPatternReplacement.set(keyPatternReplacement);
this.unwantedCharacters.set(unwantedCharacters);
this.keyPattern.set(keyPattern);
this.defaultPattern = defaultPattern;
this.keywordDelimiter = keywordDelimiter;
}
/**
* For use in test
*/
public CitationKeyPatternPreferences(boolean shouldAvoidOverwriteCiteKey,
boolean shouldWarnBeforeOverwriteCiteKey,
boolean shouldGenerateCiteKeysBeforeSaving,
KeySuffix keySuffix,
String keyPatternRegex,
String keyPatternReplacement,
String unwantedCharacters,
GlobalCitationKeyPattern keyPattern,
String defaultPattern,
Character keywordDelimiter) {
this(shouldAvoidOverwriteCiteKey,
shouldWarnBeforeOverwriteCiteKey,
shouldGenerateCiteKeysBeforeSaving,
keySuffix,
keyPatternRegex,
keyPatternReplacement,
unwantedCharacters,
keyPattern,
defaultPattern,
new SimpleObjectProperty<>(keywordDelimiter));
}
public boolean shouldAvoidOverwriteCiteKey() {
return shouldAvoidOverwriteCiteKey.get();
}
public BooleanProperty shouldAvoidOverwriteCiteKeyProperty() {
return shouldAvoidOverwriteCiteKey;
}
public void setAvoidOverwriteCiteKey(boolean shouldAvoidOverwriteCiteKey) {
this.shouldAvoidOverwriteCiteKey.set(shouldAvoidOverwriteCiteKey);
}
public boolean shouldWarnBeforeOverwriteCiteKey() {
return shouldWarnBeforeOverwriteCiteKey.get();
}
public BooleanProperty shouldWarnBeforeOverwriteCiteKeyProperty() {
return shouldWarnBeforeOverwriteCiteKey;
}
public void setWarnBeforeOverwriteCiteKey(boolean shouldWarnBeforeOverwriteCiteKey) {
this.shouldWarnBeforeOverwriteCiteKey.set(shouldWarnBeforeOverwriteCiteKey);
}
public boolean shouldGenerateCiteKeysBeforeSaving() {
return shouldGenerateCiteKeysBeforeSaving.get();
}
public BooleanProperty shouldGenerateCiteKeysBeforeSavingProperty() {
return shouldGenerateCiteKeysBeforeSaving;
}
public void setGenerateCiteKeysBeforeSaving(boolean shouldGenerateCiteKeysBeforeSaving) {
this.shouldGenerateCiteKeysBeforeSaving.set(shouldGenerateCiteKeysBeforeSaving);
}
public KeySuffix getKeySuffix() {
return keySuffix.get();
}
public ObjectProperty<KeySuffix> keySuffixProperty() {
return keySuffix;
}
public void setKeySuffix(KeySuffix keySuffix) {
this.keySuffix.set(keySuffix);
}
public String getKeyPatternRegex() {
return keyPatternRegex.get();
}
public StringProperty keyPatternRegexProperty() {
return keyPatternRegex;
}
public void setKeyPatternRegex(String keyPatternRegex) {
this.keyPatternRegex.set(keyPatternRegex);
}
public String getKeyPatternReplacement() {
return keyPatternReplacement.get();
}
public StringProperty keyPatternReplacementProperty() {
return keyPatternReplacement;
}
public void setKeyPatternReplacement(String keyPatternReplacement) {
this.keyPatternReplacement.set(keyPatternReplacement);
}
public String getUnwantedCharacters() {
return unwantedCharacters.get();
}
public StringProperty unwantedCharactersProperty() {
return unwantedCharacters;
}
public void setUnwantedCharacters(String unwantedCharacters) {
this.unwantedCharacters.set(unwantedCharacters);
}
public GlobalCitationKeyPattern getKeyPattern() {
return keyPattern.get();
}
public ObjectProperty<GlobalCitationKeyPattern> keyPatternProperty() {
return keyPattern;
}
public void setKeyPattern(GlobalCitationKeyPattern keyPattern) {
this.keyPattern.set(keyPattern);
}
public String getDefaultPattern() {
return defaultPattern;
}
public Character getKeywordDelimiter() {
return keywordDelimiter.get();
}
}
| 6,997
| 37.032609
| 101
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/DatabaseCitationKeyPattern.java
|
package org.jabref.logic.citationkeypattern;
import java.util.List;
import org.jabref.model.entry.types.EntryType;
public class DatabaseCitationKeyPattern extends AbstractCitationKeyPattern {
private final GlobalCitationKeyPattern globalCitationKeyPattern;
public DatabaseCitationKeyPattern(GlobalCitationKeyPattern globalCitationKeyPattern) {
this.globalCitationKeyPattern = globalCitationKeyPattern;
}
@Override
public List<String> getLastLevelCitationKeyPattern(EntryType entryType) {
return globalCitationKeyPattern.getValue(entryType);
}
}
| 591
| 28.6
| 90
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationkeypattern/GlobalCitationKeyPattern.java
|
package org.jabref.logic.citationkeypattern;
import java.util.List;
import org.jabref.model.entry.types.EntryType;
public class GlobalCitationKeyPattern extends AbstractCitationKeyPattern {
public GlobalCitationKeyPattern(List<String> bibtexKeyPattern) {
defaultPattern = bibtexKeyPattern;
}
public static GlobalCitationKeyPattern fromPattern(String pattern) {
return new GlobalCitationKeyPattern(split(pattern));
}
@Override
public List<String> getLastLevelCitationKeyPattern(EntryType entryType) {
return defaultPattern;
}
}
| 585
| 25.636364
| 77
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CSLAdapter.java
|
package org.jabref.logic.citationstyle;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import de.undercouch.citeproc.CSL;
import de.undercouch.citeproc.DefaultAbbreviationProvider;
import de.undercouch.citeproc.output.Bibliography;
/**
* Provides an adapter class to CSL. It holds a CSL instance under the hood that is only recreated when
* the style changes.
*
* Note on the API: The first call to {@link #makeBibliography} is expensive since the
* CSL instance will be created. As long as the style stays the same, we can reuse this instance. On style-change, the
* engine is re-instantiated. Therefore, the use-case of this class is many calls to {@link #makeBibliography} with the
* same style. Changing the output format is cheap.
*
* Note on the implementation:
* The main function {@link #makeBibliography} will enforce
* synchronized calling. The main CSL engine under the hood is not thread-safe. Since this class is usually called from
* a BackgroundTask, the only other option would be to create several CSL instances which is wasting a lot of resources and very slow.
* In the current scheme, {@link #makeBibliography} can be called as usual
* background task and to the best of my knowledge, concurrent calls will pile up and processed sequentially.
*/
public class CSLAdapter {
private final JabRefItemDataProvider dataProvider = new JabRefItemDataProvider();
private String style;
private CitationStyleOutputFormat format;
private CSL cslInstance;
/**
* Creates the bibliography of the provided items. This method needs to run synchronized because the underlying
* CSL engine is not thread-safe.
*
* @param databaseContext {@link BibDatabaseContext} is used to be able to resolve fields and their aliases
*/
public synchronized List<String> makeBibliography(List<BibEntry> bibEntries, String style, CitationStyleOutputFormat outputFormat, BibDatabaseContext databaseContext, BibEntryTypesManager entryTypesManager) throws IOException, IllegalArgumentException {
dataProvider.setData(bibEntries, databaseContext, entryTypesManager);
initialize(style, outputFormat);
cslInstance.registerCitationItems(dataProvider.getIds());
final Bibliography bibliography = cslInstance.makeBibliography();
return Arrays.asList(bibliography.getEntries());
}
/**
* Initialized the static CSL instance if needed.
*
* @param newStyle journal style of the output
* @param newFormat usually HTML or RTF.
* @throws IOException An error occurred in the underlying JavaScript framework
*/
private void initialize(String newStyle, CitationStyleOutputFormat newFormat) throws IOException {
final boolean newCslInstanceNeedsToBeCreated = (cslInstance == null) || !Objects.equals(newStyle, style);
if (newCslInstanceNeedsToBeCreated) {
// lang and forceLang are set to the default values of other CSL constructors
cslInstance = new CSL(dataProvider, new JabRefLocaleProvider(),
new DefaultAbbreviationProvider(), newStyle, "en-US");
style = newStyle;
}
if (newCslInstanceNeedsToBeCreated || (!Objects.equals(newFormat, format))) {
cslInstance.setOutputFormat(newFormat.getFormat());
format = newFormat;
}
}
}
| 3,573
| 46.653333
| 257
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CitationStyle.java
|
package org.jabref.logic.citationstyle;
import java.io.IOException;
import java.io.StringReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.jabref.logic.util.StandardFileType;
import de.undercouch.citeproc.helper.CSLUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.CharacterData;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
/**
* Representation of a CitationStyle. Stores its name, the file path and the style itself
*/
public class CitationStyle {
public static final String DEFAULT = "/ieee.csl";
private static final Logger LOGGER = LoggerFactory.getLogger(CitationStyle.class);
private static final String STYLES_ROOT = "/csl-styles";
private static final List<CitationStyle> STYLES = new ArrayList<>();
private static final DocumentBuilderFactory FACTORY = DocumentBuilderFactory.newInstance();
private final String filePath;
private final String title;
private final String source;
private CitationStyle(final String filename, final String title, final String source) {
this.filePath = Objects.requireNonNull(filename);
this.title = Objects.requireNonNull(title);
this.source = Objects.requireNonNull(source);
}
/**
* Creates an CitationStyle instance out of the style string
*/
private static Optional<CitationStyle> createCitationStyleFromSource(final String source, final String filename) {
if ((filename != null) && !filename.isEmpty() && (source != null) && !source.isEmpty()) {
try {
InputSource inputSource = new InputSource();
inputSource.setCharacterStream(new StringReader(stripInvalidProlog(source)));
Document doc = FACTORY.newDocumentBuilder().parse(inputSource);
// See CSL#canFormatBibliographies, checks if the tag exists
NodeList bibs = doc.getElementsByTagName("bibliography");
if (bibs.getLength() <= 0) {
LOGGER.debug("no bibliography element for file {} ", filename);
return Optional.empty();
}
NodeList nodes = doc.getElementsByTagName("info");
NodeList titleNode = ((Element) nodes.item(0)).getElementsByTagName("title");
String title = ((CharacterData) titleNode.item(0).getFirstChild()).getData();
return Optional.of(new CitationStyle(filename, title, source));
} catch (ParserConfigurationException | SAXException | IOException e) {
LOGGER.error("Error while parsing source", e);
}
}
return Optional.empty();
}
private static String stripInvalidProlog(String source) {
int startIndex = source.indexOf("<");
if (startIndex > 0) {
return source.substring(startIndex);
} else {
return source;
}
}
/**
* Loads the CitationStyle from the given file
*/
public static Optional<CitationStyle> createCitationStyleFromFile(final String styleFile) {
if (!isCitationStyleFile(styleFile)) {
LOGGER.error("Can only load style files: {}", styleFile);
return Optional.empty();
}
try {
String text;
String internalFile = STYLES_ROOT + (styleFile.startsWith("/") ? "" : "/") + styleFile;
URL url = CitationStyle.class.getResource(internalFile);
if (url != null) {
text = CSLUtils.readURLToString(url, StandardCharsets.UTF_8.toString());
} else {
// if the url is null then the style is located outside the classpath
text = new String(Files.readAllBytes(Path.of(styleFile)), StandardCharsets.UTF_8);
}
return createCitationStyleFromSource(text, styleFile);
} catch (NoSuchFileException e) {
LOGGER.error("Could not find file: {}", styleFile, e);
} catch (IOException e) {
LOGGER.error("Error reading source file", e);
}
return Optional.empty();
}
/**
* Provides the default citation style which is currently IEEE
*
* @return default citation style
*/
public static CitationStyle getDefault() {
return createCitationStyleFromFile(DEFAULT).orElse(new CitationStyle("", "Empty", ""));
}
/**
* Provides the citation styles that come with JabRef.
*
* @return list of available citation styles
*/
public static List<CitationStyle> discoverCitationStyles() {
if (!STYLES.isEmpty()) {
return STYLES;
}
URL url = CitationStyle.class.getResource(STYLES_ROOT + "/acm-siggraph.csl");
Objects.requireNonNull(url);
try {
URI uri = url.toURI();
Path path = Path.of(uri).getParent();
STYLES.addAll(discoverCitationStylesInPath(path));
return STYLES;
} catch (URISyntaxException | IOException e) {
LOGGER.error("something went wrong while searching available CitationStyles", e);
return Collections.emptyList();
}
}
private static List<CitationStyle> discoverCitationStylesInPath(Path path) throws IOException {
try (Stream<Path> stream = Files.find(path, 1, (file, attr) -> file.toString().endsWith("csl"))) {
return stream.map(Path::getFileName)
.map(Path::toString)
.map(CitationStyle::createCitationStyleFromFile)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList());
}
}
/**
* Checks if the given style file is a CitationStyle
*/
public static boolean isCitationStyleFile(String styleFile) {
return StandardFileType.CITATION_STYLE.getExtensions().stream().anyMatch(styleFile::endsWith);
}
public String getTitle() {
return title;
}
public String getSource() {
return source;
}
public String getFilePath() {
return filePath;
}
@Override
public String toString() {
return title;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
CitationStyle other = (CitationStyle) o;
return Objects.equals(source, other.source);
}
@Override
public int hashCode() {
return Objects.hash(source);
}
}
| 7,245
| 33.179245
| 118
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CitationStyleCache.java
|
package org.jabref.logic.citationstyle;
import java.util.Objects;
import org.jabref.logic.preview.PreviewLayout;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.event.EntriesRemovedEvent;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.event.EntryChangedEvent;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.eventbus.Subscribe;
/**
* Caches the generated Citations for quicker access
* {@link CitationStyleGenerator} generates the citation with JavaScript which may take some time
*/
public class CitationStyleCache {
private static final int CACHE_SIZE = 1024;
private PreviewLayout citationStyle;
private final LoadingCache<BibEntry, String> citationStyleCache;
public CitationStyleCache(BibDatabaseContext databaseContext) {
citationStyleCache = CacheBuilder.newBuilder().maximumSize(CACHE_SIZE).build(new CacheLoader<BibEntry, String>() {
@Override
public String load(BibEntry entry) {
if (citationStyle != null) {
return citationStyle.generatePreview(entry, databaseContext);
} else {
return "";
}
}
});
databaseContext.getDatabase().registerListener(new BibDatabaseEntryListener());
}
/**
* Returns the citation for the given entry.
*/
public String getCitationFor(BibEntry entry) {
return citationStyleCache.getUnchecked(entry);
}
public void setCitationStyle(PreviewLayout citationStyle) {
Objects.requireNonNull(citationStyle);
if (!this.citationStyle.equals(citationStyle)) {
this.citationStyle = citationStyle;
this.citationStyleCache.invalidateAll();
}
}
private class BibDatabaseEntryListener {
/**
* removes the outdated citation of the changed entry
*/
@Subscribe
public void listen(EntryChangedEvent entryChangedEvent) {
citationStyleCache.invalidate(entryChangedEvent.getBibEntry());
}
/**
* removes the citation of the removed entries as they are not needed anymore
*/
@Subscribe
public void listen(EntriesRemovedEvent entriesRemovedEvent) {
for (BibEntry entry : entriesRemovedEvent.getBibEntries()) {
citationStyleCache.invalidate(entry);
}
}
}
}
| 2,567
| 32.789474
| 122
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CitationStyleGenerator.java
|
package org.jabref.logic.citationstyle;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jbibtex.TokenMgrException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Facade to unify the access to the citation style engine. Use these methods if you need rendered BibTeX item(s) in a
* given journal style. This class uses {@link CSLAdapter} to create output.
*/
public class CitationStyleGenerator {
private static final Logger LOGGER = LoggerFactory.getLogger(CitationStyleGenerator.class);
private static final CSLAdapter CSL_ADAPTER = new CSLAdapter();
private CitationStyleGenerator() {
}
/**
* Generates a Citation based on the given entry and style with a default {@link BibDatabaseContext}
*
* @implNote the citation is generated using JavaScript which may take some time, better call it from outside the main Thread
*/
protected static String generateCitation(BibEntry entry, CitationStyle style, BibEntryTypesManager entryTypesManager) {
return generateCitation(entry, style.getSource(), entryTypesManager);
}
/**
* Generates a Citation based on the given entry and style with a default {@link BibDatabaseContext}
*
* @implNote the citation is generated using JavaScript which may take some time, better call it from outside the main Thread
*/
protected static String generateCitation(BibEntry entry, String style, BibEntryTypesManager entryTypesManager) {
return generateCitation(entry, style, CitationStyleOutputFormat.HTML, new BibDatabaseContext(), entryTypesManager);
}
/**
* Generates a Citation based on the given entry, style, and output format
*
* @implNote the citation is generated using JavaScript which may take some time, better call it from outside the main Thread
*/
public static String generateCitation(BibEntry entry, String style, CitationStyleOutputFormat outputFormat, BibDatabaseContext databaseContext, BibEntryTypesManager entryTypesManager) {
return generateCitations(Collections.singletonList(entry), style, outputFormat, databaseContext, entryTypesManager).stream().findFirst().orElse("");
}
/**
* Generates the citation for multiple entries at once.
*
* @implNote The citations are generated using JavaScript which may take some time, better call it from outside the main thread.
*/
public static List<String> generateCitations(List<BibEntry> bibEntries, String style, CitationStyleOutputFormat outputFormat, BibDatabaseContext databaseContext, BibEntryTypesManager entryTypesManager) {
try {
return CSL_ADAPTER.makeBibliography(bibEntries, style, outputFormat, databaseContext, entryTypesManager);
} catch (IllegalArgumentException e) {
LOGGER.error("Could not generate BibEntry citation. The CSL engine could not create a preview for your item.", e);
return Collections.singletonList(Localization.lang("Cannot generate preview based on selected citation style."));
} catch (IOException | ArrayIndexOutOfBoundsException e) {
LOGGER.error("Could not generate BibEntry citation", e);
return Collections.singletonList(Localization.lang("Cannot generate preview based on selected citation style."));
} catch (TokenMgrException e) {
LOGGER.error("Bad character inside BibEntry", e);
// sadly one cannot easily retrieve the bad char from the TokenMgrError
return Collections.singletonList(Localization.lang("Cannot generate preview based on selected citation style.") +
outputFormat.getLineSeparator() +
Localization.lang("Bad character inside entry") +
outputFormat.getLineSeparator() +
e.getLocalizedMessage());
}
}
}
| 4,093
| 50.175
| 207
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CitationStyleOutputFormat.java
|
package org.jabref.logic.citationstyle;
import org.jabref.logic.util.OS;
public enum CitationStyleOutputFormat {
HTML("html", OS.NEWLINE + "<br>" + OS.NEWLINE),
TEXT("text", "");
private final String format;
private final String lineSeparator;
CitationStyleOutputFormat(String format, String lineSeparator) {
this.format = format;
this.lineSeparator = lineSeparator;
}
public String getFormat() {
return format;
}
public String getLineSeparator() {
return lineSeparator;
}
@Override
public String toString() {
return format;
}
}
| 629
| 19.322581
| 68
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/CitationStylePreviewLayout.java
|
package org.jabref.logic.citationstyle;
import org.jabref.logic.preview.PreviewLayout;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
public class CitationStylePreviewLayout implements PreviewLayout {
private final CitationStyle citationStyle;
private final BibEntryTypesManager bibEntryTypesManager;
public CitationStylePreviewLayout(CitationStyle citationStyle, BibEntryTypesManager bibEntryTypesManager) {
this.citationStyle = citationStyle;
this.bibEntryTypesManager = bibEntryTypesManager;
}
@Override
public String generatePreview(BibEntry entry, BibDatabaseContext databaseContext) {
return CitationStyleGenerator.generateCitation(entry, citationStyle.getSource(), CitationStyleOutputFormat.HTML, databaseContext, bibEntryTypesManager);
}
@Override
public String getDisplayName() {
return citationStyle.getTitle();
}
public String getSource() {
return citationStyle.getSource();
}
public String getFilePath() {
return citationStyle.getFilePath();
}
@Override
public String getName() {
return citationStyle.getTitle();
}
}
| 1,259
| 30.5
| 160
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/JabRefItemDataProvider.java
|
package org.jabref.logic.citationstyle;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Optional;
import java.util.Set;
import org.jabref.logic.formatter.bibtexfields.RemoveNewlinesFormatter;
import org.jabref.logic.integrity.PagesChecker;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.Month;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.types.StandardEntryType;
import org.jabref.model.strings.LatexToUnicodeAdapter;
import de.undercouch.citeproc.ItemDataProvider;
import de.undercouch.citeproc.bibtex.BibTeXConverter;
import de.undercouch.citeproc.csl.CSLItemData;
import de.undercouch.citeproc.helper.json.StringJsonBuilderFactory;
import org.jbibtex.BibTeXEntry;
import org.jbibtex.DigitStringValue;
import org.jbibtex.Key;
/**
* Custom {@link ItemDataProvider} that allows to set the data so that we don't have to instantiate a new CSL object
* every time.
*/
public class JabRefItemDataProvider implements ItemDataProvider {
private static final BibTeXConverter BIBTEX_CONVERTER = new BibTeXConverter();
private final StringJsonBuilderFactory stringJsonBuilderFactory;
private final List<BibEntry> data = new ArrayList<>();
private BibDatabaseContext bibDatabaseContext;
private BibEntryTypesManager entryTypesManager;
private PagesChecker pagesChecker;
public JabRefItemDataProvider() {
stringJsonBuilderFactory = new StringJsonBuilderFactory();
}
/**
* Converts the {@link BibEntry} into {@link CSLItemData}.
*
* <br>
* <table>
* <thead>
* <tr>
* <th style="text-align:left">BibTeX</th>
* <th style="text-align:left">BibLaTeX</th>
* <th style="text-align:left">EntryPreview/CSL</th>
* <th style="text-align:left">proposed logic, conditions and info</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <td style="text-align:left">volume</td>
* <td style="text-align:left">volume</td>
* <td style="text-align:left">volume</td>
* <td style="text-align:left"></td>
* </tr>
* <tr>
* <td style="text-align:left">number</td>
* <td style="text-align:left">issue</td>
* <td style="text-align:left">issue</td>
* <td style="text-align:left">For conversion to CSL or BibTeX: BibLaTeX <code>number</code> takes priority and supersedes BibLaTeX <code>issue</code></td>
* </tr>
* <tr>
* <td style="text-align:left">number</td>
* <td style="text-align:left">number</td>
* <td style="text-align:left">issue</td>
* <td style="text-align:left">same as above</td>
* </tr>
* <tr>
* <td style="text-align:left">pages</td>
* <td style="text-align:left">eid</td>
* <td style="text-align:left">number</td>
* <td style="text-align:left">Some journals put the article-number (= eid) into the pages field. If BibLaTeX <code>eid</code> exists, provide csl <code>number</code> to the style. If <code>pages</code> exists, provide csl <code>page</code>. If <code>eid</code> WITHIN the <code>pages</code> field exists, detect the eid and provide csl <code>number</code>. If both <code>eid</code> and <code>pages</code> exists, ideally provide both csl <code>number</code> and csl <code>page</code>. Ideally the citationstyle should be able to flexibly choose the rendering.</td>
* </tr>
* <tr>
* <td style="text-align:left">pages</td>
* <td style="text-align:left">pages</td>
* <td style="text-align:left">page</td>
* <td style="text-align:left">same as above</td>
* </tr>
* </tbody>
* </table>
*/
private CSLItemData bibEntryToCSLItemData(BibEntry originalBibEntry, BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager) {
// We need to make a deep copy, because we modify the entry according to the logic presented at
// https://github.com/JabRef/jabref/issues/8372#issuecomment-1014941935
BibEntry bibEntry = (BibEntry) originalBibEntry.clone();
String citeKey = bibEntry.getCitationKey().orElse("");
BibTeXEntry bibTeXEntry = new BibTeXEntry(new Key(bibEntry.getType().getName()), new Key(citeKey));
// Not every field is already generated into latex free fields
RemoveNewlinesFormatter removeNewlinesFormatter = new RemoveNewlinesFormatter();
Optional<BibEntryType> entryType = entryTypesManager.enrich(bibEntry.getType(), bibDatabaseContext.getMode());
if (bibEntry.getType().equals(StandardEntryType.Article)) {
// Patch bibEntry to contain the right BibTeX (not BibLaTeX) fields
// Note that we do not need to convert from "pages" to "page", because CiteProc already handles it
// See BibTeXConverter
if (bibDatabaseContext.isBiblatexMode()) {
// Map "number" to CSL "issue", unless no number exists
Optional<String> numberField = bibEntry.getField(StandardField.NUMBER);
numberField.ifPresent(number -> {
bibEntry.setField(StandardField.ISSUE, number);
bibEntry.clearField(StandardField.NUMBER);
}
);
bibEntry.getField(StandardField.EID).ifPresent(eid -> {
if (!bibEntry.hasField(StandardField.NUMBER)) {
bibEntry.setField(StandardField.NUMBER, eid);
bibEntry.clearField(StandardField.EID);
}
});
} else {
// BibTeX mode
bibEntry.getField(StandardField.NUMBER).ifPresent(number -> {
bibEntry.setField(StandardField.ISSUE, number);
bibEntry.clearField(StandardField.NUMBER);
});
bibEntry.getField(StandardField.PAGES).ifPresent(pages -> {
if (pages.toLowerCase(Locale.ROOT).startsWith("article ")) {
pages = pages.substring("Article ".length());
bibEntry.setField(StandardField.NUMBER, pages);
}
});
bibEntry.getField(StandardField.EID).ifPresent(eid -> {
if (!bibEntry.hasField(StandardField.PAGES)) {
bibEntry.setField(StandardField.PAGES, eid);
bibEntry.clearField(StandardField.EID);
}
});
}
}
Set<Field> fields = new LinkedHashSet<>(entryType.map(BibEntryType::getAllFields).orElse(bibEntry.getFields()));
fields.addAll(bibEntry.getFields());
for (Field key : fields) {
bibEntry.getResolvedFieldOrAlias(key, bibDatabaseContext.getDatabase())
.map(removeNewlinesFormatter::format)
.map(LatexToUnicodeAdapter::format)
.ifPresent(value -> {
if (StandardField.MONTH == key) {
// Change month from #mon# to mon because CSL does not support the former format
value = bibEntry.getMonth().map(Month::getShortName).orElse(value);
}
bibTeXEntry.addField(new Key(key.getName()), new DigitStringValue(value));
});
}
return BIBTEX_CONVERTER.toItemData(bibTeXEntry);
}
/**
* Fills the data with all entries in given bibDatabaseContext
*/
public void setData(BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager) {
this.setData(bibDatabaseContext.getEntries(), bibDatabaseContext, entryTypesManager);
}
public void setData(List<BibEntry> data, BibDatabaseContext bibDatabaseContext, BibEntryTypesManager entryTypesManager) {
this.data.clear();
this.data.addAll(data);
this.bibDatabaseContext = bibDatabaseContext;
this.entryTypesManager = entryTypesManager;
// Quick solution to always use BibLaTeX mode at the checker to allow pages ranges with single dash, too
// Example: pages = {1-2}
BibDatabaseContext ctx = new BibDatabaseContext();
ctx.setMode(BibDatabaseMode.BIBLATEX);
this.pagesChecker = new PagesChecker(ctx);
}
@Override
public CSLItemData retrieveItem(String id) {
return data.stream()
.filter(entry -> entry.getCitationKey().orElse("").equals(id))
.map(entry -> bibEntryToCSLItemData(entry, bibDatabaseContext, entryTypesManager))
.findFirst().orElse(null);
}
@Override
public Collection<String> getIds() {
return data.stream()
.map(entry -> entry.getCitationKey().orElse(""))
.toList();
}
}
| 9,183
| 44.241379
| 570
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/citationstyle/JabRefLocaleProvider.java
|
package org.jabref.logic.citationstyle;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import de.undercouch.citeproc.LocaleProvider;
import de.undercouch.citeproc.helper.CSLUtils;
/**
* A {@link LocaleProvider} that loads locales from a directory in the current module.
* <p>
* This implementation is only a slight adaption of {@link de.undercouch.citeproc.DefaultLocaleProvider}.
*/
public class JabRefLocaleProvider implements LocaleProvider {
private static final String LOCALES_ROOT = "/csl-locales";
private final Map<String, String> locales = new HashMap<>();
@Override
public String retrieveLocale(String lang) {
return locales.computeIfAbsent(lang, locale -> {
try {
URL url = getClass().getResource(LOCALES_ROOT + "/locales-" + locale + ".xml");
if (url == null) {
throw new IllegalArgumentException("Unable to load locale " + locale);
}
return CSLUtils.readURLToString(url, "UTF-8");
} catch (IOException e) {
throw new UncheckedIOException("failed to read locale " + locale, e);
}
});
}
}
| 1,270
| 31.589744
| 105
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/CleanupJob.java
|
package org.jabref.logic.cleanup;
import java.util.List;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
@FunctionalInterface
public interface CleanupJob {
/**
* Cleanup the entry.
*/
List<FieldChange> cleanup(BibEntry entry);
}
| 280
| 16.5625
| 46
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/CleanupWorker.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.preferences.TimestampPreferences;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.preferences.CleanupPreferences;
import org.jabref.preferences.FilePreferences;
public class CleanupWorker {
private final BibDatabaseContext databaseContext;
private final FilePreferences filePreferences;
private final TimestampPreferences timestampPreferences;
public CleanupWorker(BibDatabaseContext databaseContext, FilePreferences filePreferences, TimestampPreferences timestampPreferences) {
this.databaseContext = databaseContext;
this.filePreferences = filePreferences;
this.timestampPreferences = timestampPreferences;
}
public List<FieldChange> cleanup(CleanupPreferences preset, BibEntry entry) {
Objects.requireNonNull(preset);
Objects.requireNonNull(entry);
List<CleanupJob> jobs = determineCleanupActions(preset);
List<FieldChange> changes = new ArrayList<>();
for (CleanupJob job : jobs) {
changes.addAll(job.cleanup(entry));
}
return changes;
}
private List<CleanupJob> determineCleanupActions(CleanupPreferences preset) {
List<CleanupJob> jobs = new ArrayList<>();
for (CleanupPreferences.CleanupStep action : preset.getActiveJobs()) {
jobs.add(toJob(action));
}
if (preset.getFieldFormatterCleanups().isEnabled()) {
jobs.addAll(preset.getFieldFormatterCleanups().getConfiguredActions());
}
return jobs;
}
private CleanupJob toJob(CleanupPreferences.CleanupStep action) {
return switch (action) {
case CLEAN_UP_DOI ->
new DoiCleanup();
case CLEANUP_EPRINT ->
new EprintCleanup();
case CLEAN_UP_URL ->
new URLCleanup();
case MAKE_PATHS_RELATIVE ->
new RelativePathsCleanup(databaseContext, filePreferences);
case RENAME_PDF ->
new RenamePdfCleanup(false, databaseContext, filePreferences);
case RENAME_PDF_ONLY_RELATIVE_PATHS ->
new RenamePdfCleanup(true, databaseContext, filePreferences);
case CLEAN_UP_UPGRADE_EXTERNAL_LINKS ->
new UpgradePdfPsToFileCleanup();
case CONVERT_TO_BIBLATEX ->
new ConvertToBiblatexCleanup();
case CONVERT_TO_BIBTEX ->
new ConvertToBibtexCleanup();
case CONVERT_TIMESTAMP_TO_CREATIONDATE ->
new TimeStampToCreationDate(timestampPreferences);
case CONVERT_TIMESTAMP_TO_MODIFICATIONDATE ->
new TimeStampToModificationDate(timestampPreferences);
case MOVE_PDF ->
new MoveFilesCleanup(databaseContext, filePreferences);
case FIX_FILE_LINKS ->
new FileLinksCleanup();
case CLEAN_UP_ISSN ->
new ISSNCleanup();
default ->
throw new UnsupportedOperationException(action.name());
};
}
}
| 3,359
| 36.752809
| 138
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/ConvertToBiblatexCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.EntryConverter;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
/**
* Converts the entry to biblatex format.
*/
public class ConvertToBiblatexCleanup implements CleanupJob {
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
for (Map.Entry<Field, Field> alias : EntryConverter.FIELD_ALIASES_BIBTEX_TO_BIBLATEX.entrySet()) {
Field oldField = alias.getKey();
Field newField = alias.getValue();
entry.getField(oldField).ifPresent(oldValue -> {
if (!oldValue.isEmpty() && (!entry.getField(newField).isPresent())) {
// There is content in the old field and no value in the new, so just copy
entry.setField(newField, oldValue).ifPresent(changes::add);
entry.clearField(oldField).ifPresent(changes::add);
}
});
}
// Dates: create date out of year and month, save it and delete old fields
// If there already exists a non blank/empty value for the field date, it is not overwritten
if (StringUtil.isBlank(entry.getField(StandardField.DATE))) {
entry.getFieldOrAlias(StandardField.DATE).ifPresent(newDate -> {
entry.setField(StandardField.DATE, newDate).ifPresent(changes::add);
entry.clearField(StandardField.YEAR).ifPresent(changes::add);
entry.clearField(StandardField.MONTH).ifPresent(changes::add);
});
} else {
// If the year from date field is filled and equal to year it should be removed the year field
entry.getFieldOrAlias(StandardField.DATE).ifPresent(date -> {
Optional<Date> newDate = Date.parse(date);
Optional<Date> checkDate = Date.parse(entry.getFieldOrAlias(StandardField.YEAR),
entry.getFieldOrAlias(StandardField.MONTH), Optional.empty());
if (checkDate.equals(newDate)) {
entry.clearField(StandardField.YEAR).ifPresent(changes::add);
entry.clearField(StandardField.MONTH).ifPresent(changes::add);
}
});
}
return changes;
}
}
| 2,629
| 43.576271
| 106
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/ConvertToBibtexCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.EntryConverter;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
/**
* Converts the entry to biblatex format.
*/
public class ConvertToBibtexCleanup implements CleanupJob {
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
// Dates: get date and fill year and month
// If there already exists a non blank/empty value for the field, then it is not overwritten
entry.getPublicationDate().ifPresent(date -> {
if (StringUtil.isBlank(entry.getField(StandardField.YEAR))) {
date.getYear().flatMap(year -> entry.setField(StandardField.YEAR, year.toString())).ifPresent(changes::add);
}
if (StringUtil.isBlank(entry.getField(StandardField.MONTH))) {
date.getMonth().flatMap(month -> entry.setField(StandardField.MONTH, month.getJabRefFormat())).ifPresent(changes::add);
}
if (changes.size() > 0) {
entry.clearField(StandardField.DATE).ifPresent(changes::add);
}
});
for (Map.Entry<Field, Field> alias : EntryConverter.FIELD_ALIASES_BIBTEX_TO_BIBLATEX.entrySet()) {
Field oldField = alias.getValue();
Field newField = alias.getKey();
entry.getField(oldField).ifPresent(oldValue -> {
if (!oldValue.isEmpty() && (!entry.getField(newField).isPresent())) {
// There is content in the old field and no value in the new, so just copy
entry.setField(newField, oldValue).ifPresent(changes::add);
entry.clearField(oldField).ifPresent(changes::add);
}
});
}
return changes;
}
}
| 2,066
| 38
| 135
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/DoiCleanup.java
|
package org.jabref.logic.cleanup;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.identifier.DOI;
/**
* Formats the DOI (e.g. removes http part) and also moves DOIs from note, url or ee field to the doi field.
*/
public class DoiCleanup implements CleanupJob {
/**
* Fields to check for DOIs.
*/
private static final List<Field> FIELDS = Arrays.asList(StandardField.NOTE, StandardField.URL, new UnknownField("ee"));
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
// First check if the Doi Field is empty
if (entry.hasField(StandardField.DOI)) {
String doiFieldValue = entry.getField(StandardField.DOI).orElse(null);
String decodeDoiFieldValue = "";
decodeDoiFieldValue = URLDecoder.decode(doiFieldValue, StandardCharsets.UTF_8);
doiFieldValue = decodeDoiFieldValue;
Optional<DOI> doi = DOI.parse(doiFieldValue);
if (doi.isPresent()) {
String newValue = doi.get().getDOI();
if (!doiFieldValue.equals(newValue)) {
entry.setField(StandardField.DOI, newValue);
FieldChange change = new FieldChange(entry, StandardField.DOI, doiFieldValue, newValue);
changes.add(change);
}
// Doi field seems to contain Doi -> cleanup note, url, ee field
for (Field field : FIELDS) {
entry.getField(field).flatMap(DOI::parse)
.ifPresent(unused -> removeFieldValue(entry, field, changes));
}
}
} else {
// As the Doi field is empty we now check if note, url, or ee field contains a Doi
for (Field field : FIELDS) {
Optional<DOI> doi = entry.getField(field).flatMap(DOI::parse);
if (doi.isPresent()) {
// Update Doi
Optional<FieldChange> change = entry.setField(StandardField.DOI, doi.get().getDOI());
change.ifPresent(changes::add);
removeFieldValue(entry, field, changes);
}
}
}
return changes;
}
private void removeFieldValue(BibEntry entry, Field field, List<FieldChange> changes) {
CleanupJob eraser = new FieldFormatterCleanup(field, new ClearFormatter());
changes.addAll(eraser.cleanup(entry));
}
}
| 2,949
| 36.820513
| 123
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/EprintCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.ArXivIdentifier;
/**
* Formats the DOI (e.g. removes http part) and also moves DOIs from note, url or ee field to the doi field.
*/
public class EprintCleanup implements CleanupJob {
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
for (Field field : Arrays.asList(StandardField.URL, StandardField.JOURNAL, StandardField.JOURNALTITLE, StandardField.NOTE)) {
Optional<ArXivIdentifier> arXivIdentifier = entry.getField(field).flatMap(ArXivIdentifier::parse);
if (arXivIdentifier.isPresent()) {
entry.setField(StandardField.EPRINT, arXivIdentifier.get().getNormalized())
.ifPresent(changes::add);
entry.setField(StandardField.EPRINTTYPE, "arxiv")
.ifPresent(changes::add);
arXivIdentifier.get().getClassification().ifPresent(classification ->
entry.setField(StandardField.EPRINTCLASS, classification)
.ifPresent(changes::add)
);
entry.clearField(field)
.ifPresent(changes::add);
if (field.equals(StandardField.URL)) {
// If we clear the URL field, we should also clear the URL-date field
entry.clearField(StandardField.URLDATE)
.ifPresent(changes::add);
}
}
}
return changes;
}
}
| 1,871
| 35
| 133
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/FieldFormatterCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.event.EntriesEventSource;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.InternalField;
/**
* Formats a given entry field with the specified formatter.
*/
public class FieldFormatterCleanup implements CleanupJob {
private final Field field;
private final Formatter formatter;
public FieldFormatterCleanup(Field field, Formatter formatter) {
this.field = field;
this.formatter = formatter;
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
if (InternalField.INTERNAL_ALL_FIELD == field) {
return cleanupAllFields(entry);
} else if (InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD == field) {
return cleanupAllTextFields(entry);
} else {
return cleanupSingleField(field, entry);
}
}
/**
* Runs the formatter on the specified field in the given entry.
* <p>
* If the formatter returns an empty string, then the field is removed.
*
* @param fieldKey the field on which to run the formatter
* @param entry the entry to be cleaned up
* @return a list of changes of the entry
*/
private List<FieldChange> cleanupSingleField(Field fieldKey, BibEntry entry) {
if (!entry.hasField(fieldKey)) {
// Not set -> nothing to do
return Collections.emptyList();
}
String oldValue = entry.getField(fieldKey).orElse(null);
// Run formatter
String newValue = formatter.format(oldValue);
if (newValue.equals(oldValue)) {
return Collections.emptyList();
} else {
if (newValue.isEmpty()) {
entry.clearField(fieldKey);
newValue = null;
} else {
entry.setField(fieldKey, newValue, EntriesEventSource.SAVE_ACTION);
}
FieldChange change = new FieldChange(entry, fieldKey, oldValue, newValue);
return Collections.singletonList(change);
}
}
private List<FieldChange> cleanupAllFields(BibEntry entry) {
List<FieldChange> fieldChanges = new ArrayList<>();
for (Field fieldKey : entry.getFields()) {
if (!fieldKey.equals(InternalField.KEY_FIELD)) {
fieldChanges.addAll(cleanupSingleField(fieldKey, entry));
}
}
return fieldChanges;
}
private List<FieldChange> cleanupAllTextFields(BibEntry entry) {
List<FieldChange> fieldChanges = new ArrayList<>();
Set<Field> fields = new HashSet<>(entry.getFields());
FieldFactory.getNotTextFieldNames().forEach(fields::remove);
for (Field fieldKey : fields) {
if (!fieldKey.equals(InternalField.KEY_FIELD)) {
fieldChanges.addAll(cleanupSingleField(fieldKey, entry));
}
}
return fieldChanges;
}
public Field getField() {
return field;
}
public Formatter getFormatter() {
return formatter;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof FieldFormatterCleanup that) {
return Objects.equals(field, that.field) && Objects.equals(formatter, that.formatter);
}
return false;
}
@Override
public int hashCode() {
return Objects.hash(field, formatter);
}
@Override
public String toString() {
return field + ": " + formatter.getName();
}
}
| 3,896
| 29.445313
| 98
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/FieldFormatterCleanups.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.StringJoiner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.formatter.Formatters;
import org.jabref.logic.formatter.IdentityFormatter;
import org.jabref.logic.formatter.bibtexfields.HtmlToLatexFormatter;
import org.jabref.logic.formatter.bibtexfields.HtmlToUnicodeFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeDateFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeMonthFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter;
import org.jabref.logic.formatter.bibtexfields.OrdinalsToSuperscriptFormatter;
import org.jabref.logic.formatter.bibtexfields.UnicodeToLatexFormatter;
import org.jabref.logic.layout.format.LatexToUnicodeFormatter;
import org.jabref.logic.layout.format.ReplaceUnicodeLigaturesFormatter;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.strings.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class FieldFormatterCleanups {
public static final List<FieldFormatterCleanup> DEFAULT_SAVE_ACTIONS;
public static final List<FieldFormatterCleanup> RECOMMEND_BIBTEX_ACTIONS;
public static final List<FieldFormatterCleanup> RECOMMEND_BIBLATEX_ACTIONS;
public static final String ENABLED = "enabled";
public static final String DISABLED = "disabled";
private static final Logger LOGGER = LoggerFactory.getLogger(FieldFormatterCleanups.class);
/**
* This parses the key/list map of fields and clean up actions for the field.
* <p>
* General format for one key/list map: <code>...[...]</code> - <code>field[formatter1,formatter2,...]</code>
* Multiple are written as <code>...[...]...[...]...[...]</code>
* <code>field1[formatter1,formatter2,...]field2[formatter3,formatter4,...]</code>
* <p>
* The idea is that characters are field names until <code>[</code> is reached and that formatter lists are terminated by <code>]</code>
* <p>
* Example: <code>pages[normalize_page_numbers]title[escapeAmpersands,escapeDollarSign,escapeUnderscores,latex_cleanup]</code>
*/
private static final Pattern FIELD_FORMATTER_CLEANUP_PATTERN = Pattern.compile("([^\\[]+)\\[([^]]+)]");
static {
DEFAULT_SAVE_ACTIONS = List.of(
new FieldFormatterCleanup(StandardField.PAGES, new NormalizePagesFormatter()),
new FieldFormatterCleanup(StandardField.DATE, new NormalizeDateFormatter()),
new FieldFormatterCleanup(StandardField.MONTH, new NormalizeMonthFormatter()),
new FieldFormatterCleanup(InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD, new ReplaceUnicodeLigaturesFormatter()));
List<FieldFormatterCleanup> recommendedBibtexFormatters = new ArrayList<>(DEFAULT_SAVE_ACTIONS);
recommendedBibtexFormatters.addAll(List.of(
new FieldFormatterCleanup(InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD, new HtmlToLatexFormatter()),
new FieldFormatterCleanup(InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD, new UnicodeToLatexFormatter()),
new FieldFormatterCleanup(InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD, new OrdinalsToSuperscriptFormatter())));
RECOMMEND_BIBTEX_ACTIONS = Collections.unmodifiableList(recommendedBibtexFormatters);
List<FieldFormatterCleanup> recommendedBiblatexFormatters = new ArrayList<>(DEFAULT_SAVE_ACTIONS);
recommendedBiblatexFormatters.addAll(List.of(
new FieldFormatterCleanup(StandardField.TITLE, new HtmlToUnicodeFormatter()),
new FieldFormatterCleanup(InternalField.INTERNAL_ALL_TEXT_FIELDS_FIELD, new LatexToUnicodeFormatter())));
// DO NOT ADD OrdinalsToSuperscriptFormatter here, because this causes issues. See https://github.com/JabRef/jabref/issues/2596.
RECOMMEND_BIBLATEX_ACTIONS = Collections.unmodifiableList(recommendedBiblatexFormatters);
}
private final boolean enabled;
private final List<FieldFormatterCleanup> actions;
public FieldFormatterCleanups(boolean enabled, List<FieldFormatterCleanup> actions) {
this.enabled = enabled;
this.actions = Objects.requireNonNull(actions);
}
/**
* Note: String parsing is done at {@link FieldFormatterCleanups#parse(String)}
*/
public static String getMetaDataString(List<FieldFormatterCleanup> actionList, String newLineSeparator) {
// First, group all formatters by the field for which they apply
// Order of the list should be kept
Map<Field, List<String>> groupedByField = new LinkedHashMap<>();
for (FieldFormatterCleanup cleanup : actionList) {
Field key = cleanup.getField();
// add new list into the hashmap if needed
if (!groupedByField.containsKey(key)) {
groupedByField.put(key, new ArrayList<>());
}
// add the formatter to the map if it is not already there
List<String> formattersForKey = groupedByField.get(key);
if (!formattersForKey.contains(cleanup.getFormatter().getKey())) {
formattersForKey.add(cleanup.getFormatter().getKey());
}
}
// convert the contents of the hashmap into the correct serialization
StringBuilder result = new StringBuilder();
for (Map.Entry<Field, List<String>> entry : groupedByField.entrySet()) {
result.append(entry.getKey().getName());
StringJoiner joiner = new StringJoiner(",", "[", "]" + newLineSeparator);
entry.getValue().forEach(joiner::add);
result.append(joiner);
}
return result.toString();
}
public boolean isEnabled() {
return enabled;
}
public List<FieldFormatterCleanup> getConfiguredActions() {
return Collections.unmodifiableList(actions);
}
public List<FieldChange> applySaveActions(BibEntry entry) {
if (enabled) {
return applyAllActions(entry);
} else {
return Collections.emptyList();
}
}
private List<FieldChange> applyAllActions(BibEntry entry) {
List<FieldChange> result = new ArrayList<>();
for (FieldFormatterCleanup action : actions) {
result.addAll(action.cleanup(entry));
}
return result;
}
// ToDo: This should reside in MetaDataSerializer
public List<String> getAsStringList(String delimiter) {
List<String> stringRepresentation = new ArrayList<>();
if (enabled) {
stringRepresentation.add(ENABLED);
} else {
stringRepresentation.add(DISABLED);
}
String formatterString = getMetaDataString(actions, delimiter);
stringRepresentation.add(formatterString);
return stringRepresentation;
}
public static List<FieldFormatterCleanup> parse(String formatterString) {
if ((formatterString == null) || formatterString.isEmpty()) {
// no save actions defined in the meta data
return Collections.emptyList();
}
List<FieldFormatterCleanup> result = new ArrayList<>();
// first remove all newlines for easier parsing
String formatterStringWithoutLineBreaks = StringUtil.unifyLineBreaks(formatterString, "");
Matcher matcher = FIELD_FORMATTER_CLEANUP_PATTERN.matcher(formatterStringWithoutLineBreaks);
while (matcher.find()) {
String fieldKey = matcher.group(1);
Field field = FieldFactory.parseField(fieldKey);
String fieldString = matcher.group(2);
List<FieldFormatterCleanup> fieldFormatterCleanups = Arrays.stream(fieldString.split(","))
.map(FieldFormatterCleanups::getFormatterFromString)
.map(formatter -> new FieldFormatterCleanup(field, formatter))
.toList();
result.addAll(fieldFormatterCleanups);
}
return result;
}
// ToDo: This should reside in MetaDataParser
public static FieldFormatterCleanups parse(List<String> formatterMetaList) {
if ((formatterMetaList != null) && (formatterMetaList.size() >= 2)) {
boolean enablementStatus = FieldFormatterCleanups.ENABLED.equals(formatterMetaList.get(0));
String formatterString = formatterMetaList.get(1);
return new FieldFormatterCleanups(enablementStatus, parse(formatterString));
} else {
// return default actions
return new FieldFormatterCleanups(false, DEFAULT_SAVE_ACTIONS);
}
}
static Formatter getFormatterFromString(String formatterName) {
for (Formatter formatter : Formatters.getAll()) {
if (formatterName.equals(formatter.getKey())) {
return formatter;
}
}
LOGGER.info("Formatter {} not found.", formatterName);
return new IdentityFormatter();
}
@Override
public int hashCode() {
return Objects.hash(actions, enabled);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof FieldFormatterCleanups other) {
return Objects.equals(actions, other.actions) && (enabled == other.enabled);
}
return false;
}
@Override
public String toString() {
return "FieldFormatterCleanups{" +
"enabled=" + enabled + "," +
"actions=" + actions +
"}";
}
}
| 10,191
| 41.466667
| 140
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/FileLinksCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.bibtex.FileFieldWriter;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.StandardField;
/**
* Fixes the format of the file field. For example, if the file link is empty but the description wrongly contains the path.
*/
public class FileLinksCleanup implements CleanupJob {
@Override
public List<FieldChange> cleanup(BibEntry entry) {
Optional<String> oldValue = entry.getField(StandardField.FILE);
if (!oldValue.isPresent()) {
return Collections.emptyList();
}
List<LinkedFile> fileList = entry.getFiles();
// Parsing automatically moves a single description to link, so we just need to write the fileList back again
String newValue = FileFieldWriter.getStringRepresentation(fileList);
if (!oldValue.get().equals(newValue)) {
entry.setField(StandardField.FILE, newValue);
FieldChange change = new FieldChange(entry, StandardField.FILE, oldValue.get(), newValue);
return Collections.singletonList(change);
}
return Collections.emptyList();
}
}
| 1,327
| 34.891892
| 124
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/Formatter.java
|
package org.jabref.logic.cleanup;
/**
* The Formatter is used for a Filter design-pattern. Extending classes have to accept a String and returned a formatted
* version of it. Implementations have to reside in the logic package.
* <p>
* Example:
* <p>
* "John von Neumann" => "von Neumann, John"
*/
public abstract class Formatter {
/**
* Returns a human readable name of the formatter usable for e.g. in the GUI
*
* @return the name of the formatter, always not null
*/
public abstract String getName();
/**
* Returns a unique key for the formatter that can be used for its identification
*
* @return the key of the formatter, always not null
*/
public abstract String getKey();
/**
* Formats a field value by with a particular formatter transformation.
* <p>
* Calling this method with a null argument results in a NullPointerException.
*
* @param value the input String
* @return the formatted output String
*/
public abstract String format(String value);
/**
* Returns a description of the formatter.
*
* @return the description string, always non empty
*/
public abstract String getDescription();
/**
* Returns an example input string of the formatter. This example is used as input to the formatter to demonstrate
* its functionality
*
* @return the example input string, always non empty
*/
public abstract String getExampleInput();
/**
* Returns a default hashcode of the formatter based on its key.
*
* @return the hash of the key of the formatter
*/
@Override
public int hashCode() {
return getKey().hashCode();
}
/**
* Indicates whether some other object is the same formatter as this one based on the key.
*
* @param obj the object to compare the formatter to
* @return true if the object is a formatter with the same key
*/
@Override
public boolean equals(Object obj) {
if (obj instanceof Formatter formatter) {
return getKey().equals(formatter.getKey());
} else {
return false;
}
}
}
| 2,205
| 27.649351
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/ISSNCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.ISSN;
public class ISSNCleanup implements CleanupJob {
@Override
public List<FieldChange> cleanup(BibEntry entry) {
Optional<String> issnString = entry.getField(StandardField.ISSN);
if (!issnString.isPresent()) {
return Collections.emptyList();
}
ISSN issn = new ISSN(issnString.get());
if (issn.isCanBeCleaned()) {
String newValue = issn.getCleanedISSN();
FieldChange change = new FieldChange(entry, StandardField.ISSN, issnString.get(), newValue);
entry.setField(StandardField.ISSN, newValue);
return Collections.singletonList(change);
}
return Collections.emptyList();
}
}
| 992
| 31.032258
| 104
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/MoveFieldCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.List;
import java.util.Optional;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.util.OptionalUtil;
/**
* Moves the content of one field to another field.
*/
public class MoveFieldCleanup implements CleanupJob {
private Field sourceField;
private Field targetField;
public MoveFieldCleanup(Field sourceField, Field targetField) {
this.sourceField = sourceField;
this.targetField = targetField;
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
Optional<FieldChange> setFieldChange = entry.getField(sourceField).flatMap(
value -> entry.setField(targetField, value));
Optional<FieldChange> clearFieldChange = entry.clearField(sourceField);
return OptionalUtil.toList(setFieldChange, clearFieldChange);
}
}
| 956
| 28.90625
| 83
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/MoveFilesCleanup.java
|
package org.jabref.logic.cleanup;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.externalfiles.LinkedFileHandler;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.util.OptionalUtil;
import org.jabref.preferences.FilePreferences;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MoveFilesCleanup implements CleanupJob {
private static final Logger LOGGER = LoggerFactory.getLogger(MoveFilesCleanup.class);
private final BibDatabaseContext databaseContext;
private final FilePreferences filePreferences;
public MoveFilesCleanup(BibDatabaseContext databaseContext, FilePreferences filePreferences) {
this.databaseContext = Objects.requireNonNull(databaseContext);
this.filePreferences = Objects.requireNonNull(filePreferences);
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<LinkedFile> files = entry.getFiles();
boolean changed = false;
for (LinkedFile file : files) {
LinkedFileHandler fileHandler = new LinkedFileHandler(file, entry, databaseContext, filePreferences);
try {
boolean fileChanged = fileHandler.moveToDefaultDirectory();
if (fileChanged) {
changed = true;
}
} catch (IOException exception) {
LOGGER.error("Error while moving file {}", file.getLink(), exception);
}
}
if (changed) {
Optional<FieldChange> changes = entry.setFiles(files);
return OptionalUtil.toList(changes);
}
return Collections.emptyList();
}
}
| 1,897
| 32.298246
| 113
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/RelativePathsCleanup.java
|
package org.jabref.logic.cleanup;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.preferences.FilePreferences;
public class RelativePathsCleanup implements CleanupJob {
private final BibDatabaseContext databaseContext;
private final FilePreferences filePreferences;
public RelativePathsCleanup(BibDatabaseContext databaseContext, FilePreferences filePreferences) {
this.databaseContext = Objects.requireNonNull(databaseContext);
this.filePreferences = Objects.requireNonNull(filePreferences);
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<LinkedFile> fileList = entry.getFiles();
List<LinkedFile> newFileList = new ArrayList<>();
boolean changed = false;
for (LinkedFile fileEntry : fileList) {
String oldFileName = fileEntry.getLink();
String newFileName = null;
if (fileEntry.isOnlineLink()) {
// keep online link untouched
newFileName = oldFileName;
} else {
// only try to transform local file path to relative one
newFileName = FileUtil
.relativize(Path.of(oldFileName), databaseContext.getFileDirectories(filePreferences))
.toString();
}
LinkedFile newFileEntry = fileEntry;
if (!oldFileName.equals(newFileName)) {
newFileEntry = new LinkedFile(fileEntry.getDescription(), Path.of(newFileName), fileEntry.getFileType());
changed = true;
}
newFileList.add(newFileEntry);
}
if (changed) {
Optional<FieldChange> change = entry.setFiles(newFileList);
if (change.isPresent()) {
return Collections.singletonList(change.get());
} else {
return Collections.emptyList();
}
}
return Collections.emptyList();
}
}
| 2,324
| 34.769231
| 121
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/RenamePdfCleanup.java
|
package org.jabref.logic.cleanup;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.externalfiles.LinkedFileHandler;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.util.OptionalUtil;
import org.jabref.preferences.FilePreferences;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RenamePdfCleanup implements CleanupJob {
private static final Logger LOGGER = LoggerFactory.getLogger(RenamePdfCleanup.class);
private final BibDatabaseContext databaseContext;
private final boolean onlyRelativePaths;
private final FilePreferences filePreferences;
public RenamePdfCleanup(boolean onlyRelativePaths, BibDatabaseContext databaseContext, FilePreferences filePreferences) {
this.databaseContext = Objects.requireNonNull(databaseContext);
this.onlyRelativePaths = onlyRelativePaths;
this.filePreferences = filePreferences;
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<LinkedFile> files = entry.getFiles();
boolean changed = false;
for (LinkedFile file : files) {
if (onlyRelativePaths && Path.of(file.getLink()).isAbsolute()) {
continue;
}
LinkedFileHandler fileHandler = new LinkedFileHandler(file, entry, databaseContext, filePreferences);
try {
boolean changedFile = fileHandler.renameToSuggestedName();
if (changedFile) {
changed = true;
}
} catch (IOException exception) {
LOGGER.error("Error while renaming file {}", file.getLink(), exception);
}
}
if (changed) {
Optional<FieldChange> changes = entry.setFiles(files);
return OptionalUtil.toList(changes);
}
return Collections.emptyList();
}
}
| 2,142
| 33.015873
| 125
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/TimeStampToCreationDate.java
|
package org.jabref.logic.cleanup;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.preferences.TimestampPreferences;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.event.EntriesEventSource;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
/**
* This class handles the migration from timestamp field to creationdate and modificationdate fields.
* <p>
* If the old updateTimestamp setting is enabled, the timestamp field for each entry are migrated to the date-modified field.
* Otherwise it is migrated to the date-added field.
*/
public class TimeStampToCreationDate implements CleanupJob {
private final Field timeStampField;
public TimeStampToCreationDate(TimestampPreferences timestampPreferences) {
timeStampField = timestampPreferences.getTimestampField();
}
/**
* Formats the time stamp into the local date and time format.
* If the existing timestamp could not be parsed, the day/month/year "1" is used.
* For the time portion 00:00:00 is used.
*/
private Optional<String> formatTimeStamp(String timeStamp) {
Optional<Date> parsedDate = Date.parse(timeStamp);
if (parsedDate.isEmpty()) {
// In case the given timestamp could not be parsed
return Optional.empty();
} else {
Date date = parsedDate.get();
int year = date.getYear().orElse(1);
int month = getMonth(date);
int day = date.getDay().orElse(1);
LocalDateTime localDateTime = LocalDateTime.of(year, month, day, 0, 0);
// Remove any time unites smaller than seconds
localDateTime.truncatedTo(ChronoUnit.SECONDS);
return Optional.of(localDateTime.format(DateTimeFormatter.ISO_LOCAL_DATE_TIME));
}
}
/**
* Returns the month value of the passed date if available.
* Otherwise returns the current month.
*/
private int getMonth(Date date) {
if (date.getMonth().isPresent()) {
return date.getMonth().get().getNumber();
}
return 1;
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
// Query entries for their timestamp field entries
if (entry.getField(timeStampField).isPresent()) {
Optional<String> formattedTimeStamp = formatTimeStamp(entry.getField(timeStampField).get());
if (formattedTimeStamp.isEmpty()) {
// In case the timestamp could not be parsed, do nothing to not lose data
return Collections.emptyList();
}
// Setting the EventSource is necessary to circumvent the update of the modification date during timestamp migration
entry.clearField(timeStampField, EntriesEventSource.CLEANUP_TIMESTAMP);
List<FieldChange> changeList = new ArrayList<>();
FieldChange changeTo;
// Add removal of timestamp field
changeList.add(new FieldChange(entry, StandardField.TIMESTAMP, formattedTimeStamp.get(), ""));
entry.setField(StandardField.CREATIONDATE, formattedTimeStamp.get(), EntriesEventSource.CLEANUP_TIMESTAMP);
changeTo = new FieldChange(entry, StandardField.CREATIONDATE, entry.getField(StandardField.CREATIONDATE).orElse(""), formattedTimeStamp.get());
changeList.add(changeTo);
return changeList;
}
return Collections.emptyList();
}
}
| 3,764
| 41.303371
| 155
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/TimeStampToModificationDate.java
|
package org.jabref.logic.cleanup;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.preferences.TimestampPreferences;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.event.EntriesEventSource;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
/**
* This class handles the migration from timestamp field to creationdate and modificationdate fields.
* <p>
* If the old updateTimestamp setting is enabled, the timestamp field for each entry are migrated to the date-modified field.
* Otherwise it is migrated to the date-added field.
*/
public class TimeStampToModificationDate implements CleanupJob {
private final Field timeStampField;
public TimeStampToModificationDate(TimestampPreferences timestampPreferences) {
timeStampField = timestampPreferences.getTimestampField();
}
/**
* Formats the time stamp into the local date and time format.
* If the existing timestamp could not be parsed, the day/month/year "1" is used.
* For the time portion 00:00:00 is used.
*/
private Optional<String> formatTimeStamp(String timeStamp) {
Optional<Date> parsedDate = Date.parse(timeStamp);
if (parsedDate.isEmpty()) {
// In case the given timestamp could not be parsed
return Optional.empty();
} else {
Date date = parsedDate.get();
int year = date.getYear().orElse(1);
int month = getMonth(date);
int day = date.getDay().orElse(1);
LocalDateTime localDateTime = LocalDateTime.of(year, month, day, 0, 0);
// Remove any time unites smaller than seconds
localDateTime.truncatedTo(ChronoUnit.SECONDS);
return Optional.of(localDateTime.format(DateTimeFormatter.ISO_LOCAL_DATE_TIME));
}
}
/**
* Returns the month value of the passed date if available.
* Otherwise returns the current month.
*/
private int getMonth(Date date) {
if (date.getMonth().isPresent()) {
return date.getMonth().get().getNumber();
}
return 1;
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
// Query entries for their timestamp field entries
if (entry.getField(timeStampField).isPresent()) {
Optional<String> formattedTimeStamp = formatTimeStamp(entry.getField(timeStampField).get());
if (formattedTimeStamp.isEmpty()) {
// In case the timestamp could not be parsed, do nothing to not lose data
return Collections.emptyList();
}
// Setting the EventSource is necessary to circumvent the update of the modification date during timestamp migration
entry.clearField(timeStampField, EntriesEventSource.CLEANUP_TIMESTAMP);
List<FieldChange> changeList = new ArrayList<>();
FieldChange changeTo;
// Add removal of timestamp field
changeList.add(new FieldChange(entry, StandardField.TIMESTAMP, formattedTimeStamp.get(), ""));
entry.setField(StandardField.MODIFICATIONDATE, formattedTimeStamp.get(), EntriesEventSource.CLEANUP_TIMESTAMP);
changeTo = new FieldChange(entry, StandardField.MODIFICATIONDATE, entry.getField(StandardField.MODIFICATIONDATE).orElse(""), formattedTimeStamp.get());
changeList.add(changeTo);
return changeList;
}
return Collections.emptyList();
}
}
| 3,784
| 41.52809
| 163
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/URLCleanup.java
|
package org.jabref.logic.cleanup;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jabref.logic.formatter.bibtexfields.NormalizeDateFormatter;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.Date;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
/**
* Checks whether URL exists in note field, and stores it under url field.
*/
public class URLCleanup implements CleanupJob {
/*
* The urlRegex was originally fetched from a suggested solution in
* https://stackoverflow.com/questions/28185064/python-infinite-loop-in-regex-to-match-url.
* In order to be functional, we made the necessary adjustments regarding Java
* features (mainly doubled backslashes).
*/
public static final String URL_REGEX = "(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.]"
+ "[a-z]{2,4}/)(?:[^\\s()<>\\\\]+|\\(([^\\s()<>\\\\]+|(\\([^\\s()"
+ "<>\\\\]+\\)))*\\))+(?:\\(([^\\s()<>\\\\]+|(\\([^\\s()<>\\\\]+\\"
+ ")))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))";
public static final String DATE_TERMS_REGEX = "accessed on|visited on|retrieved on|viewed on";
private static final Field NOTE_FIELD = StandardField.NOTE;
private static final Field URL_FIELD = StandardField.URL;
private static final Field URLDATE_FIELD = StandardField.URLDATE;
final Pattern urlPattern = Pattern.compile(URL_REGEX, Pattern.CASE_INSENSITIVE);
final Pattern dateTermsPattern = Pattern.compile(DATE_TERMS_REGEX, Pattern.CASE_INSENSITIVE);
final Pattern datePattern = Pattern.compile(Date.DATE_REGEX, Pattern.CASE_INSENSITIVE);
private NormalizeDateFormatter formatter = new NormalizeDateFormatter();
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
String noteFieldValue = entry.getField(NOTE_FIELD).orElse(null);
final Matcher urlMatcher = urlPattern.matcher(noteFieldValue);
final Matcher dateTermsMatcher = dateTermsPattern.matcher(noteFieldValue);
final Matcher dateMatcher = datePattern.matcher(noteFieldValue);
if (urlMatcher.find()) {
String url = urlMatcher.group();
// Remove the URL from the NoteFieldValue
String newNoteFieldValue = noteFieldValue
.replace(url, "")
/*
* The following regex erases unnecessary remaining
* content in note field. Explanation:
* <ul>
* <li>"(, )?": Matches an optional comma followed by a space</li>
* <li>"\\?": Matches an optional backslash</li>
* <li>"url\{\}": Matches the literal string "url{}"</li>
* </ul>
* Note that the backslashes are doubled as Java requirement
*/
.replaceAll("(, )?\\\\?url\\{\\}(, )?", "");
/*
* In case the url and note fields hold the same URL, then we just
* remove it from the note field, and no other action is performed.
*/
if (entry.hasField(URL_FIELD)) {
String urlFieldValue = entry.getField(URL_FIELD).orElse(null);
if (urlFieldValue.equals(url)) {
entry.setField(NOTE_FIELD, newNoteFieldValue).ifPresent(changes::add);
}
} else {
entry.setField(NOTE_FIELD, newNoteFieldValue).ifPresent(changes::add);
entry.setField(URL_FIELD, url).ifPresent(changes::add);
}
if (dateTermsMatcher.find()) {
String term = dateTermsMatcher.group();
newNoteFieldValue = newNoteFieldValue
.replace(term, "");
if (dateMatcher.find()) {
String date = dateMatcher.group();
String formattedDate = formatter.format(date);
newNoteFieldValue = newNoteFieldValue
.replace(date, "").trim()
.replaceAll("^,|,$", "").trim(); // either starts or ends with a comma
// Same approach with the URL cleanup.
if (entry.hasField(URLDATE_FIELD)) {
String urlDateFieldValue = entry.getField(URLDATE_FIELD).orElse(null);
if (urlDateFieldValue.equals(formattedDate)) {
entry.setField(NOTE_FIELD, newNoteFieldValue).ifPresent(changes::add);
}
} else {
entry.setField(NOTE_FIELD, newNoteFieldValue).ifPresent(changes::add);
entry.setField(URLDATE_FIELD, formattedDate).ifPresent(changes::add);
}
}
}
}
return changes;
}
}
| 5,087
| 44.026549
| 98
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/cleanup/UpgradePdfPsToFileCleanup.java
|
package org.jabref.logic.cleanup;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.jabref.logic.bibtex.FileFieldWriter;
import org.jabref.model.FieldChange;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
/**
* Collects file links from the ps and pdf fields, and add them to the list contained in the file field.
*/
public class UpgradePdfPsToFileCleanup implements CleanupJob {
// Field name and file type name (from ExternalFileTypes)
private final Map<Field, String> fields = new HashMap<>();
public UpgradePdfPsToFileCleanup() {
fields.put(StandardField.PDF, "PDF");
fields.put(StandardField.PS, "PostScript");
}
@Override
public List<FieldChange> cleanup(BibEntry entry) {
List<FieldChange> changes = new ArrayList<>();
// If there are already links in the file field, keep those on top:
String oldFileContent = entry.getField(StandardField.FILE).orElse(null);
List<LinkedFile> fileList = new ArrayList<>(entry.getFiles());
int oldItemCount = fileList.size();
for (Map.Entry<Field, String> field : fields.entrySet()) {
entry.getField(field.getKey()).ifPresent(fieldContent -> {
if (fieldContent.trim().isEmpty()) {
return;
}
Path path = Path.of(fieldContent);
LinkedFile flEntry = new LinkedFile(path.getFileName().toString(), path, field.getValue());
fileList.add(flEntry);
entry.clearField(field.getKey());
changes.add(new FieldChange(entry, field.getKey(), fieldContent, null));
});
}
if (fileList.size() != oldItemCount) {
String newValue = FileFieldWriter.getStringRepresentation(fileList);
entry.setField(StandardField.FILE, newValue);
changes.add(new FieldChange(entry, StandardField.FILE, oldFileContent, newValue));
}
return changes;
}
}
| 2,197
| 35.032787
| 107
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/crawler/Crawler.java
|
package org.jabref.logic.crawler;
import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
import org.jabref.logic.exporter.SaveException;
import org.jabref.logic.git.SlrGitHandler;
import org.jabref.logic.importer.ParseException;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.study.QueryResult;
import org.jabref.model.util.FileUpdateMonitor;
import org.jabref.preferences.PreferencesService;
import org.eclipse.jgit.api.errors.GitAPIException;
/**
* This class provides a service for SLR support by conducting an automated search and persistance
* of studies using the queries and E-Libraries specified in the provided study definition file.
*
* It composes a StudyRepository for repository management,
* and a StudyFetcher that manages the crawling over the selected E-Libraries.
*/
public class Crawler {
public static final String FILENAME_STUDY_RESULT_BIB = "studyResult.bib";
private final StudyRepository studyRepository;
private final StudyFetcher studyFetcher;
/**
* Creates a crawler for retrieving studies from E-Libraries
*
* @param studyRepositoryRoot The path to the study repository
*/
public Crawler(Path studyRepositoryRoot,
SlrGitHandler gitHandler,
PreferencesService preferencesService,
BibEntryTypesManager bibEntryTypesManager,
FileUpdateMonitor fileUpdateMonitor) throws IllegalArgumentException, IOException, ParseException {
this.studyRepository = new StudyRepository(
studyRepositoryRoot,
gitHandler,
preferencesService,
fileUpdateMonitor,
bibEntryTypesManager);
StudyCatalogToFetcherConverter studyCatalogToFetcherConverter = new StudyCatalogToFetcherConverter(
studyRepository.getActiveLibraryEntries(),
preferencesService.getImportFormatPreferences(),
preferencesService.getImporterPreferences());
this.studyFetcher = new StudyFetcher(
studyCatalogToFetcherConverter.getActiveFetchers(),
studyRepository.getSearchQueryStrings());
}
/**
* This methods performs the crawling of the active libraries defined in the study definition file.
* This method also persists the results in the same folder the study definition file is stored in.
*
* The whole process works as follows:
* <ol>
* <li>Then the search is executed</li>
* <li>The repository changes to the search branch</li>
* <li>Afterwards, the results are persisted on the search branch.</li>
* <li>Finally, the changes are merged into the work branch</li>
* </ol>
*
* @throws IOException Thrown if a problem occurred during the persistence of the result.
*/
public void performCrawl() throws IOException, GitAPIException, SaveException {
List<QueryResult> results = studyFetcher.crawl();
studyRepository.persist(results);
}
}
| 3,087
| 40.72973
| 118
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/crawler/StudyCatalogToFetcherConverter.java
|
package org.jabref.logic.crawler;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.ImporterPreferences;
import org.jabref.logic.importer.SearchBasedFetcher;
import org.jabref.logic.importer.WebFetchers;
import org.jabref.model.study.StudyDatabase;
/**
* Converts library entries from the given study into their corresponding fetchers.
*/
class StudyCatalogToFetcherConverter {
private final List<StudyDatabase> libraryEntries;
private final ImportFormatPreferences importFormatPreferences;
private final ImporterPreferences importerPreferences;
public StudyCatalogToFetcherConverter(List<StudyDatabase> libraryEntries,
ImportFormatPreferences importFormatPreferences,
ImporterPreferences importerPreferences) {
this.libraryEntries = libraryEntries;
this.importFormatPreferences = importFormatPreferences;
this.importerPreferences = importerPreferences;
}
/**
* Returns a list of instances of all active library fetchers.
*
* A fetcher is considered active if there exists an library entry of the library the fetcher is associated with that is enabled.
*
* @return Instances of all active fetchers defined in the study definition.
*/
public List<SearchBasedFetcher> getActiveFetchers() {
return getFetchersFromLibraryEntries(this.libraryEntries);
}
/**
* Transforms a list of libraryEntries into a list of SearchBasedFetcher instances.
*
* @param libraryEntries List of entries
* @return List of fetcher instances
*/
private List<SearchBasedFetcher> getFetchersFromLibraryEntries(List<StudyDatabase> libraryEntries) {
return libraryEntries.parallelStream()
.map(this::createFetcherFromLibraryEntry)
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
/**
* Transforms a library entry into a SearchBasedFetcher instance. This only works if the library entry specifies a supported fetcher.
*
* @param studyDatabase the entry that will be converted
* @return An instance of the fetcher defined by the library entry.
*/
private SearchBasedFetcher createFetcherFromLibraryEntry(StudyDatabase studyDatabase) {
Set<SearchBasedFetcher> searchBasedFetchers = WebFetchers.getSearchBasedFetchers(importFormatPreferences, importerPreferences);
String libraryNameFromFetcher = studyDatabase.getName();
return searchBasedFetchers.stream()
.filter(searchBasedFetcher -> searchBasedFetcher.getName().equalsIgnoreCase(libraryNameFromFetcher))
.findAny()
.orElse(null);
}
}
| 2,993
| 42.391304
| 137
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/crawler/StudyFetcher.java
|
package org.jabref.logic.crawler;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import org.jabref.logic.importer.FetcherException;
import org.jabref.logic.importer.PagedSearchBasedFetcher;
import org.jabref.logic.importer.SearchBasedFetcher;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.study.FetchResult;
import org.jabref.model.study.QueryResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Delegates the search of the provided set of targeted E-Libraries with the provided queries to the E-Library specific fetchers,
* and aggregates the results returned by the fetchers by query and E-Library.
*/
class StudyFetcher {
private static final Logger LOGGER = LoggerFactory.getLogger(StudyFetcher.class);
private static final int MAX_AMOUNT_OF_RESULTS_PER_FETCHER = 100;
private final List<SearchBasedFetcher> activeFetchers;
private final List<String> searchQueries;
StudyFetcher(List<SearchBasedFetcher> activeFetchers, List<String> searchQueries) throws IllegalArgumentException {
this.searchQueries = searchQueries;
this.activeFetchers = activeFetchers;
}
/**
* Each Map Entry contains the results for one search term for all libraries.
* Each entry of the internal map contains the results for a given library.
* If any library API is not available, its corresponding entry is missing from the internal map.
*/
public List<QueryResult> crawl() {
return searchQueries.parallelStream()
.map(this::getQueryResult)
.collect(Collectors.toList());
}
private QueryResult getQueryResult(String searchQuery) {
return new QueryResult(searchQuery, performSearchOnQuery(searchQuery));
}
/**
* Queries all catalogs on the given searchQuery.
*
* @param searchQuery The query the search is performed for.
* @return Mapping of each fetcher by name and all their retrieved publications as a BibDatabase
*/
private List<FetchResult> performSearchOnQuery(String searchQuery) {
return activeFetchers.parallelStream()
.map(fetcher -> performSearchOnQueryForFetcher(searchQuery, fetcher))
.filter(Objects::nonNull)
.collect(Collectors.toList());
}
private FetchResult performSearchOnQueryForFetcher(String searchQuery, SearchBasedFetcher fetcher) {
try {
List<BibEntry> fetchResult = new ArrayList<>();
if (fetcher instanceof PagedSearchBasedFetcher basedFetcher) {
int pages = (int) Math.ceil(((double) MAX_AMOUNT_OF_RESULTS_PER_FETCHER) / basedFetcher.getPageSize());
for (int page = 0; page < pages; page++) {
fetchResult.addAll(basedFetcher.performSearchPaged(searchQuery, page).getContent());
}
} else {
fetchResult = fetcher.performSearch(searchQuery);
}
return new FetchResult(fetcher.getName(), new BibDatabase(fetchResult));
} catch (FetcherException e) {
LOGGER.warn(String.format("%s API request failed", fetcher.getName()), e);
return null;
}
}
}
| 3,389
| 40.851852
| 129
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/crawler/StudyRepository.java
|
package org.jabref.logic.crawler;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.charset.UnsupportedCharsetException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.jabref.logic.citationkeypattern.CitationKeyGenerator;
import org.jabref.logic.database.DatabaseMerger;
import org.jabref.logic.exporter.AtomicFileWriter;
import org.jabref.logic.exporter.BibWriter;
import org.jabref.logic.exporter.BibtexDatabaseWriter;
import org.jabref.logic.exporter.SaveConfiguration;
import org.jabref.logic.exporter.SaveException;
import org.jabref.logic.git.SlrGitHandler;
import org.jabref.logic.importer.OpenDatabase;
import org.jabref.logic.importer.SearchBasedFetcher;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.io.FileNameCleaner;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.study.FetchResult;
import org.jabref.model.study.QueryResult;
import org.jabref.model.study.Study;
import org.jabref.model.study.StudyDatabase;
import org.jabref.model.study.StudyQuery;
import org.jabref.model.util.FileUpdateMonitor;
import org.jabref.preferences.PreferencesService;
import org.eclipse.jgit.api.errors.GitAPIException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class manages all aspects of the study process related to the repository.
*
* It includes the parsing of the study definition file (study.bib) into a Study instance,
* the structured persistence of the crawling results for the study within the file based repository,
* as well as the sharing, and versioning of results using git.
*/
public class StudyRepository {
// Tests work with study.yml
public static final String STUDY_DEFINITION_FILE_NAME = "study.yml";
private static final Logger LOGGER = LoggerFactory.getLogger(StudyRepository.class);
private static final Pattern MATCH_COLON = Pattern.compile(":");
private static final Pattern MATCH_ILLEGAL_CHARACTERS = Pattern.compile("[^A-Za-z0-9_.\\s=-]");
// Currently we make assumptions about the configuration: the remotes, work and search branch names
private static final String REMOTE = "origin";
private static final String WORK_BRANCH = "work";
private static final String SEARCH_BRANCH = "search";
private final Path repositoryPath;
private final Path studyDefinitionFile;
private final SlrGitHandler gitHandler;
private final Study study;
private final PreferencesService preferencesService;
private final FileUpdateMonitor fileUpdateMonitor;
private final BibEntryTypesManager bibEntryTypesManager;
/**
* Creates a study repository.
*
* @param pathToRepository Where the repository root is located.
* @param gitHandler The git handler that manages any interaction with the remote repository
* @throws IllegalArgumentException If the repository root directory does not exist, or the root directory does not
* contain the study definition file.
* @throws IOException Thrown if the given repository does not exists, or the study definition file
* does not exist
*/
public StudyRepository(Path pathToRepository,
SlrGitHandler gitHandler,
PreferencesService preferencesService,
FileUpdateMonitor fileUpdateMonitor,
BibEntryTypesManager bibEntryTypesManager) throws IOException {
this.repositoryPath = pathToRepository;
this.gitHandler = gitHandler;
this.preferencesService = preferencesService;
this.fileUpdateMonitor = fileUpdateMonitor;
this.studyDefinitionFile = Path.of(repositoryPath.toString(), STUDY_DEFINITION_FILE_NAME);
this.bibEntryTypesManager = bibEntryTypesManager;
if (Files.notExists(repositoryPath)) {
throw new IOException("The given repository does not exists.");
}
try {
gitHandler.createCommitOnCurrentBranch("Save changes before searching.", false);
gitHandler.checkoutBranch(WORK_BRANCH);
updateWorkAndSearchBranch();
} catch (GitAPIException e) {
LOGGER.error("Could not checkout work branch");
}
if (Files.notExists(studyDefinitionFile)) {
throw new IOException("The study definition file does not exist in the given repository.");
}
study = parseStudyFile();
try {
final String updateRepositoryStructureMessage = "Update repository structure";
// Update repository structure on work branch in case of changes
setUpRepositoryStructureForQueriesAndFetchers();
gitHandler.createCommitOnCurrentBranch(updateRepositoryStructureMessage, false);
gitHandler.checkoutBranch(SEARCH_BRANCH);
// If study definition does not exist on this branch or was changed on work branch, copy it from work
boolean studyDefinitionDoesNotExistOrChanged = !(Files.exists(studyDefinitionFile) && new StudyYamlParser().parseStudyYamlFile(studyDefinitionFile).equals(study));
if (studyDefinitionDoesNotExistOrChanged) {
new StudyYamlParser().writeStudyYamlFile(study, studyDefinitionFile);
}
setUpRepositoryStructureForQueriesAndFetchers();
gitHandler.createCommitOnCurrentBranch(updateRepositoryStructureMessage, false);
} catch (GitAPIException e) {
LOGGER.error("Could not checkout search branch.");
}
try {
gitHandler.checkoutBranch(WORK_BRANCH);
} catch (GitAPIException e) {
LOGGER.error("Could not checkout work branch");
}
}
/**
* Returns entries stored in the repository for a certain query and fetcher
*/
public BibDatabaseContext getFetcherResultEntries(String query, String fetcherName) throws IOException {
if (Files.exists(getPathToFetcherResultFile(query, fetcherName))) {
return OpenDatabase.loadDatabase(getPathToFetcherResultFile(query, fetcherName),
preferencesService.getImportFormatPreferences(),
fileUpdateMonitor).getDatabaseContext();
}
return new BibDatabaseContext();
}
/**
* Returns the merged entries stored in the repository for a certain query
*/
public BibDatabaseContext getQueryResultEntries(String query) throws IOException {
if (Files.exists(getPathToQueryResultFile(query))) {
return OpenDatabase.loadDatabase(getPathToQueryResultFile(query),
preferencesService.getImportFormatPreferences(),
fileUpdateMonitor).getDatabaseContext();
}
return new BibDatabaseContext();
}
/**
* Returns the merged entries stored in the repository for all queries
*/
public BibDatabaseContext getStudyResultEntries() throws IOException {
if (Files.exists(getPathToStudyResultFile())) {
return OpenDatabase.loadDatabase(getPathToStudyResultFile(),
preferencesService.getImportFormatPreferences(),
fileUpdateMonitor).getDatabaseContext();
}
return new BibDatabaseContext();
}
/**
* The study definition file contains all the definitions of a study. This method extracts this study from the yaml study definition file
*
* @return Returns the BibEntries parsed from the study definition file.
* @throws IOException Problem opening the input stream.
*/
private Study parseStudyFile() throws IOException {
return new StudyYamlParser().parseStudyYamlFile(studyDefinitionFile);
}
/**
* Returns all query strings of the study definition
*
* @return List of all queries as Strings.
*/
public List<String> getSearchQueryStrings() {
return study.getQueries()
.parallelStream()
.map(StudyQuery::getQuery)
.collect(Collectors.toList());
}
/**
* Extracts all active fetchers from the library entries.
*
* @return List of BibEntries of type Library
* @throws IllegalArgumentException If a transformation from Library entry to LibraryDefinition fails
*/
public List<StudyDatabase> getActiveLibraryEntries() throws IllegalArgumentException {
return study.getDatabases()
.parallelStream()
.filter(StudyDatabase::isEnabled)
.collect(Collectors.toList());
}
public Study getStudy() {
return study;
}
/**
* Persists the result locally and remotely by following the steps:
* Precondition: Currently checking out work branch
* <ol>
* <li>Update the work and search branch</li>
* <li>Persist the results on the search branch</li>
* <li>Manually patch the diff of the search branch onto the work branch (as the merging will not work in
* certain cases without a conflict as it is context sensitive. But for this use case we do not need it to be
* context sensitive. So we can just prepend the patch without checking the "context" lines.</li>
* <li>Update the remote tracking branches of the work and search branch</li>
* </ol>
*/
public void persist(List<QueryResult> crawlResults) throws IOException, GitAPIException, SaveException {
updateWorkAndSearchBranch();
gitHandler.checkoutBranch(SEARCH_BRANCH);
persistResults(crawlResults);
try {
// First commit changes to search branch and update remote
String commitMessage = "Conducted search: " + LocalDateTime.now().truncatedTo(ChronoUnit.SECONDS);
boolean newSearchResults = gitHandler.createCommitOnCurrentBranch(commitMessage, false);
gitHandler.checkoutBranch(WORK_BRANCH);
if (!newSearchResults) {
return;
}
// Patch new results into work branch
gitHandler.appendLatestSearchResultsOntoCurrentBranch(commitMessage + " - Patch", SEARCH_BRANCH);
// Update both remote tracked branches
updateRemoteSearchAndWorkBranch();
} catch (GitAPIException e) {
LOGGER.error("Updating remote repository failed", e);
}
}
/**
* Update the remote tracking branches of the work and search branches
* The currently checked out branch is not changed if the method is executed successfully
*/
private void updateRemoteSearchAndWorkBranch() throws IOException, GitAPIException {
String currentBranch = gitHandler.getCurrentlyCheckedOutBranch();
// update remote search branch
gitHandler.checkoutBranch(SEARCH_BRANCH);
gitHandler.pushCommitsToRemoteRepository();
// update remote work branch
gitHandler.checkoutBranch(WORK_BRANCH);
gitHandler.pushCommitsToRemoteRepository();
gitHandler.checkoutBranch(currentBranch);
}
/**
* Updates the local work and search branches with changes from their tracking remote branches
* The currently checked out branch is not changed if the method is executed successfully
*/
private void updateWorkAndSearchBranch() throws IOException, GitAPIException {
String currentBranch = gitHandler.getCurrentlyCheckedOutBranch();
// update search branch
gitHandler.checkoutBranch(SEARCH_BRANCH);
gitHandler.pullOnCurrentBranch();
// update work branch
gitHandler.checkoutBranch(WORK_BRANCH);
gitHandler.pullOnCurrentBranch();
gitHandler.checkoutBranch(currentBranch);
}
/**
* Create for each query a folder, and for each fetcher a bib file in the query folder to store its results.
*/
private void setUpRepositoryStructureForQueriesAndFetchers() throws IOException {
// Cannot use stream here since IOException has to be thrown
StudyCatalogToFetcherConverter converter = new StudyCatalogToFetcherConverter(
this.getActiveLibraryEntries(),
preferencesService.getImportFormatPreferences(),
preferencesService.getImporterPreferences());
for (String query : this.getSearchQueryStrings()) {
createQueryResultFolder(query);
converter.getActiveFetchers()
.forEach(searchBasedFetcher -> createFetcherResultFile(query, searchBasedFetcher));
createQueryResultFile(query);
}
createStudyResultFile();
}
/**
* Creates a folder using the query and its corresponding query id.
* This folder name is unique for each query, as long as the query id in the study definition is unique for each query.
*
* @param query The query the folder is created for
*/
private void createQueryResultFolder(String query) throws IOException {
Path queryResultFolder = getPathToQueryDirectory(query);
createFolder(queryResultFolder);
}
private void createFolder(Path folder) throws IOException {
if (Files.notExists(folder)) {
Files.createDirectory(folder);
}
}
private void createFetcherResultFile(String query, SearchBasedFetcher searchBasedFetcher) {
String fetcherName = searchBasedFetcher.getName();
Path fetcherResultFile = getPathToFetcherResultFile(query, fetcherName);
createBibFile(fetcherResultFile);
}
private void createQueryResultFile(String query) {
Path queryResultFile = getPathToFetcherResultFile(query, "result");
createBibFile(queryResultFile);
}
private void createStudyResultFile() {
createBibFile(getPathToStudyResultFile());
}
private void createBibFile(Path file) {
if (Files.notExists(file)) {
try {
Files.createFile(file);
} catch (IOException e) {
throw new IllegalStateException("Error during creation of repository structure.", e);
}
}
}
/**
* Returns a string that can be used as a folder name.
* This removes all characters from the query that are illegal for directory names.
* Structure: ID-trimmed query
*
* Examples:
* Input: '(title: test-title AND abstract: Test)' as a query entry with id 12345678
* Output: '12345678 - title= test-title AND abstract= Test'
*
* Input: 'abstract: Test*' as a query entry with id 87654321
* Output: '87654321 - abstract= Test'
*
* Input: '"test driven"' as a query entry with id 12348765
* Output: '12348765 - test driven'
*
* Note that this method might be similar to {@link org.jabref.logic.util.io.FileUtil#getValidFileName(String)} or {@link org.jabref.logic.util.io.FileNameCleaner#cleanFileName(String)}
*
* @param query that is trimmed and combined with its query id
* @return a unique folder name for any query.
*/
private String trimNameAndAddID(String query) {
// Replace all field: with field= for folder name
String trimmedNamed = MATCH_COLON.matcher(query).replaceAll("=");
trimmedNamed = MATCH_ILLEGAL_CHARACTERS.matcher(trimmedNamed).replaceAll("");
String id = computeIDForQuery(query);
// Whole path has to be shorter than 260
int remainingPathLength = 220 - studyDefinitionFile.toString().length() - id.length();
if (query.length() > remainingPathLength) {
trimmedNamed = query.substring(0, remainingPathLength);
}
return id + " - " + trimmedNamed;
}
/**
* Helper to compute the query id for folder name creation.
*/
private String computeIDForQuery(String query) {
return String.valueOf(query.hashCode());
}
/**
* Persists the crawling results in the local file based repository.
*
* @param crawlResults The results that shall be persisted.
*/
private void persistResults(List<QueryResult> crawlResults) throws IOException, SaveException {
DatabaseMerger merger = new DatabaseMerger(preferencesService.getBibEntryPreferences().getKeywordSeparator());
BibDatabase newStudyResultEntries = new BibDatabase();
for (QueryResult result : crawlResults) {
BibDatabase queryResultEntries = new BibDatabase();
for (FetchResult fetcherResult : result.getResultsPerFetcher()) {
BibDatabase fetcherEntries = fetcherResult.getFetchResult();
BibDatabaseContext existingFetcherResult = getFetcherResultEntries(result.getQuery(), fetcherResult.getFetcherName());
// Merge new entries into fetcher result file
merger.merge(existingFetcherResult.getDatabase(), fetcherEntries);
// Create citation keys for all entries that do not have one
generateCiteKeys(existingFetcherResult, fetcherEntries);
// Aggregate each fetcher result into the query result
merger.merge(queryResultEntries, fetcherEntries);
writeResultToFile(getPathToFetcherResultFile(result.getQuery(), fetcherResult.getFetcherName()), existingFetcherResult.getDatabase());
}
BibDatabase existingQueryEntries = getQueryResultEntries(result.getQuery()).getDatabase();
// Merge new entries into query result file
merger.merge(existingQueryEntries, queryResultEntries);
// Aggregate all new entries for every query into the study result
merger.merge(newStudyResultEntries, queryResultEntries);
writeResultToFile(getPathToQueryResultFile(result.getQuery()), existingQueryEntries);
}
BibDatabase existingStudyResultEntries = getStudyResultEntries().getDatabase();
// Merge new entries into study result file
merger.merge(existingStudyResultEntries, newStudyResultEntries);
writeResultToFile(getPathToStudyResultFile(), existingStudyResultEntries);
}
private void generateCiteKeys(BibDatabaseContext existingEntries, BibDatabase targetEntries) {
CitationKeyGenerator citationKeyGenerator = new CitationKeyGenerator(existingEntries,
preferencesService.getCitationKeyPatternPreferences());
targetEntries.getEntries().stream().filter(bibEntry -> !bibEntry.hasCitationKey()).forEach(citationKeyGenerator::generateAndSetKey);
}
private void writeResultToFile(Path pathToFile, BibDatabase entries) throws SaveException {
try (AtomicFileWriter fileWriter = new AtomicFileWriter(pathToFile, StandardCharsets.UTF_8)) {
SaveConfiguration saveConfiguration = new SaveConfiguration()
.withMetadataSaveOrder(true)
.withReformatOnSave(preferencesService.getLibraryPreferences().shouldAlwaysReformatOnSave());
BibWriter bibWriter = new BibWriter(fileWriter, OS.NEWLINE);
BibtexDatabaseWriter databaseWriter = new BibtexDatabaseWriter(
bibWriter,
saveConfiguration,
preferencesService.getFieldPreferences(),
preferencesService.getCitationKeyPatternPreferences(),
bibEntryTypesManager);
databaseWriter.saveDatabase(new BibDatabaseContext(entries));
} catch (UnsupportedCharsetException ex) {
throw new SaveException(Localization.lang("Character encoding UTF-8 is not supported.", ex));
} catch (IOException ex) {
throw new SaveException("Problems saving", ex);
}
}
private Path getPathToFetcherResultFile(String query, String fetcherName) {
return repositoryPath.resolve(trimNameAndAddID(query)).resolve(FileNameCleaner.cleanFileName(fetcherName) + ".bib");
}
private Path getPathToQueryResultFile(String query) {
return repositoryPath.resolve(trimNameAndAddID(query)).resolve("result.bib");
}
private Path getPathToStudyResultFile() {
return repositoryPath.resolve(Crawler.FILENAME_STUDY_RESULT_BIB);
}
private Path getPathToQueryDirectory(String query) {
return repositoryPath.resolve(trimNameAndAddID(query));
}
}
| 20,743
| 43.900433
| 189
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/crawler/StudyYamlParser.java
|
package org.jabref.logic.crawler;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Path;
import org.jabref.model.study.Study;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator;
/**
* Example use: <code>new StudyYamlParser().parseStudyYamlFile(studyDefinitionFile);</code>
*/
public class StudyYamlParser {
/**
* Parses the given yaml study definition file into a study instance
*/
public Study parseStudyYamlFile(Path studyYamlFile) throws IOException {
ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory());
try (InputStream fileInputStream = new FileInputStream(studyYamlFile.toFile())) {
return yamlMapper.readValue(fileInputStream, Study.class);
}
}
/**
* Writes the given study instance into a yaml file to the given path
*/
public void writeStudyYamlFile(Study study, Path studyYamlFile) throws IOException {
ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory().disable(YAMLGenerator.Feature.WRITE_DOC_START_MARKER)
.enable(YAMLGenerator.Feature.MINIMIZE_QUOTES));
yamlMapper.writeValue(studyYamlFile.toFile(), study);
}
}
| 1,396
| 35.763158
| 122
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/database/DatabaseMerger.java
|
package org.jabref.logic.database;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseModeDetection;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.BibtexString;
import org.jabref.model.groups.AllEntriesGroup;
import org.jabref.model.groups.ExplicitGroup;
import org.jabref.model.groups.GroupHierarchyType;
import org.jabref.model.metadata.ContentSelector;
import org.jabref.model.metadata.MetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DatabaseMerger {
private static final Logger LOGGER = LoggerFactory.getLogger(DatabaseMerger.class);
private final char keywordDelimiter;
public DatabaseMerger(char keywordDelimiter) {
this.keywordDelimiter = keywordDelimiter;
}
/**
* Merges all entries and strings of the other database into the target database. Any duplicates are ignored.
* In case a string has a different content, it is added with a new unique name.
* The unique name is generated by suffix "_i", where i runs from 1 onwards.
*
* @param other The other databases that is merged into this database
*/
public synchronized void merge(BibDatabase target, BibDatabase other) {
mergeEntries(target, other);
mergeStrings(target, other);
}
/**
* Merges all entries, strings, and metaData of the other database context into the target database context. Any duplicates are ignored.
* In case a string has a different content, it is added with a new unique name.
* The unique name is generated by suffix "_i", where i runs from 1 onwards.
*
* @param other The other databases that is merged into this database
*/
public synchronized void merge(BibDatabaseContext target, BibDatabaseContext other, String otherFileName) {
mergeEntries(target.getDatabase(), other.getDatabase());
mergeStrings(target.getDatabase(), other.getDatabase());
mergeMetaData(target.getMetaData(), other.getMetaData(), otherFileName, other.getEntries());
}
private void mergeEntries(BibDatabase target, BibDatabase other) {
DuplicateCheck duplicateCheck = new DuplicateCheck(new BibEntryTypesManager());
List<BibEntry> newEntries = other.getEntries().stream()
// Remove all entries that are already part of the database (duplicate)
.filter(entry -> duplicateCheck.containsDuplicate(target, entry, BibDatabaseModeDetection.inferMode(target)).isEmpty())
.collect(Collectors.toList());
target.insertEntries(newEntries);
}
public void mergeStrings(BibDatabase target, BibDatabase other) {
for (BibtexString bibtexString : other.getStringValues()) {
String bibtexStringName = bibtexString.getName();
if (target.hasStringByName(bibtexStringName)) {
String importedContent = bibtexString.getContent();
String existingContent = target.getStringByName(bibtexStringName).get().getContent();
if (!importedContent.equals(existingContent)) {
LOGGER.info("String contents differ for {}: {} != {}", bibtexStringName, importedContent, existingContent);
int suffix = 1;
String newName = bibtexStringName + "_" + suffix;
while (target.hasStringByName(newName)) {
suffix++;
newName = bibtexStringName + "_" + suffix;
}
BibtexString newBibtexString = new BibtexString(newName, importedContent);
// TODO undo/redo
target.addString(newBibtexString);
LOGGER.info("New string added: {} = {}", newBibtexString.getName(), newBibtexString.getContent());
}
} else {
// TODO undo/redo
target.addString(bibtexString);
}
}
}
/**
* @param target the metaData that is the merge target
* @param other the metaData to merge into the target
* @param otherFilename the filename of the other library. Pass "unknown" if not known.
*/
public void mergeMetaData(MetaData target, MetaData other, String otherFilename, List<BibEntry> allOtherEntries) {
Objects.requireNonNull(other);
Objects.requireNonNull(otherFilename);
Objects.requireNonNull(allOtherEntries);
mergeGroups(target, other, otherFilename, allOtherEntries);
mergeContentSelectors(target, other);
}
private void mergeGroups(MetaData target, MetaData other, String otherFilename, List<BibEntry> allOtherEntries) {
// Adds the specified node as a child of the current root. The group contained in <b>newGroups</b> must not be of
// type AllEntriesGroup, since every tree has exactly one AllEntriesGroup (its root). The <b>newGroups</b> are
// inserted directly, i.e. they are not deepCopy()'d.
other.getGroups().ifPresent(newGroups -> {
// ensure that there is always only one AllEntriesGroup in the resulting database
// "Rename" the AllEntriesGroup of the imported database to "Imported"
if (newGroups.getGroup() instanceof AllEntriesGroup) {
// create a dummy group
try {
// This will cause a bug if the group already exists
// There will be group where the two groups are merged
String newGroupName = otherFilename;
ExplicitGroup group = new ExplicitGroup(
"Imported " + newGroupName,
GroupHierarchyType.INDEPENDENT,
keywordDelimiter);
newGroups.setGroup(group);
group.add(allOtherEntries);
} catch (IllegalArgumentException e) {
LOGGER.error("Problem appending entries to group", e);
}
}
target.getGroups().ifPresentOrElse(
newGroups::moveTo,
// target does not contain any groups, so we can just use the new groups
() -> target.setGroups(newGroups));
});
}
private void mergeContentSelectors(MetaData target, MetaData other) {
for (ContentSelector selector : other.getContentSelectorList()) {
target.addContentSelector(selector);
}
}
}
| 6,811
| 47.312057
| 160
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/database/DuplicateCheck.java
|
package org.jabref.logic.database;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.strings.StringSimilarity;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.AuthorList;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.field.BibField;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.FieldProperty;
import org.jabref.model.entry.field.OrFields;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.identifier.DOI;
import org.jabref.model.entry.identifier.ISBN;
import org.jabref.model.strings.StringUtil;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class contains utility method for duplicate checking of entries.
*/
public class DuplicateCheck {
private static final double DUPLICATE_THRESHOLD = 0.75; // The overall threshold to signal a duplicate pair
private static final Logger LOGGER = LoggerFactory.getLogger(DuplicateCheck.class);
/*
* Integer values for indicating result of duplicate check (for entries):
*/
private static final int NOT_EQUAL = 0;
private static final int EQUAL = 1;
private static final int EMPTY_IN_ONE = 2;
private static final int EMPTY_IN_TWO = 3;
private static final int EMPTY_IN_BOTH = 4;
// Non-required fields are investigated only if the required fields give a value within
// the doubt range of the threshold:
private static final double DOUBT_RANGE = 0.05;
private static final double REQUIRED_WEIGHT = 3; // Weighting of all required fields
// Extra weighting of those fields that are most likely to provide correct duplicate detection:
private static final Map<Field, Double> FIELD_WEIGHTS = new HashMap<>();
static {
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.AUTHOR, 2.5);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.EDITOR, 2.5);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.TITLE, 3.);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.JOURNAL, 2.);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.NOTE, 0.1);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.COMMENT, 0.1);
DuplicateCheck.FIELD_WEIGHTS.put(StandardField.DOI, 3.);
}
private final BibEntryTypesManager entryTypesManager;
public DuplicateCheck(BibEntryTypesManager entryTypesManager) {
this.entryTypesManager = entryTypesManager;
}
private static boolean haveSameIdentifier(final BibEntry one, final BibEntry two) {
for (final Field name : FieldFactory.getIdentifierFieldNames()) {
if (one.getField(name).isPresent() && one.getField(name).equals(two.getField(name))) {
return true;
}
}
return false;
}
private static boolean haveDifferentEntryType(final BibEntry one, final BibEntry two) {
return !one.getType().equals(two.getType());
}
private static boolean haveDifferentEditions(final BibEntry one, final BibEntry two) {
final Optional<String> editionOne = one.getField(StandardField.EDITION);
final Optional<String> editionTwo = two.getField(StandardField.EDITION);
return editionOne.isPresent() &&
editionTwo.isPresent() &&
!editionOne.get().equals(editionTwo.get());
}
private static boolean haveDifferentChaptersOrPagesOfTheSameBook(final BibEntry one, final BibEntry two) {
return (compareSingleField(StandardField.AUTHOR, one, two) == EQUAL) &&
(compareSingleField(StandardField.TITLE, one, two) == EQUAL) &&
((compareSingleField(StandardField.CHAPTER, one, two) == NOT_EQUAL) ||
(compareSingleField(StandardField.PAGES, one, two) == NOT_EQUAL));
}
private static double[] compareRequiredFields(final BibEntryType type, final BibEntry one, final BibEntry two) {
final Set<OrFields> requiredFields = type.getRequiredFields();
return requiredFields.isEmpty()
? new double[] {0., 0.}
: DuplicateCheck.compareFieldSet(requiredFields.stream().map(OrFields::getPrimary).collect(Collectors.toSet()), one, two);
}
private static boolean isFarFromThreshold(double value) {
if (value < 0.0) {
LOGGER.debug("Value {} is below zero. Should not happen", value);
}
return value - DuplicateCheck.DUPLICATE_THRESHOLD > DuplicateCheck.DOUBT_RANGE;
}
private static boolean compareOptionalFields(final BibEntryType type,
final BibEntry one,
final BibEntry two,
final double[] req) {
final Set<BibField> optionalFields = type.getOptionalFields();
if (optionalFields.isEmpty()) {
return req[0] >= DuplicateCheck.DUPLICATE_THRESHOLD;
}
final double[] opt = DuplicateCheck.compareFieldSet(optionalFields.stream().map(BibField::field).collect(Collectors.toSet()), one, two);
final double numerator = (DuplicateCheck.REQUIRED_WEIGHT * req[0] * req[1]) + (opt[0] * opt[1]);
final double denominator = (req[1] * DuplicateCheck.REQUIRED_WEIGHT) + opt[1];
final double totValue = numerator / denominator;
return totValue >= DuplicateCheck.DUPLICATE_THRESHOLD;
}
private static double[] compareFieldSet(final Collection<Field> fields, final BibEntry one, final BibEntry two) {
if (fields.isEmpty()) {
return new double[] {0.0, 0.0};
}
double equalWeights = 0;
double totalWeights = 0.;
for (final Field field : fields) {
final double currentWeight = DuplicateCheck.FIELD_WEIGHTS.getOrDefault(field, 1.0);
totalWeights += currentWeight;
int result = DuplicateCheck.compareSingleField(field, one, two);
if (result == EQUAL) {
equalWeights += currentWeight;
} else if (result == EMPTY_IN_BOTH) {
totalWeights -= currentWeight;
}
}
if (totalWeights > 0) {
return new double[] {equalWeights / totalWeights, totalWeights};
}
// all fields are empty in both --> have no difference at all
return new double[] {0.0, 0.0};
}
private static int compareSingleField(final Field field, final BibEntry one, final BibEntry two) {
final Optional<String> optionalStringOne = one.getField(field);
final Optional<String> optionalStringTwo = two.getField(field);
if (!optionalStringOne.isPresent()) {
if (!optionalStringTwo.isPresent()) {
return EMPTY_IN_BOTH;
}
return EMPTY_IN_ONE;
} else if (!optionalStringTwo.isPresent()) {
return EMPTY_IN_TWO;
}
// Both strings present
final String stringOne = optionalStringOne.get();
final String stringTwo = optionalStringTwo.get();
if (field.getProperties().contains(FieldProperty.PERSON_NAMES)) {
return compareAuthorField(stringOne, stringTwo);
} else if (StandardField.PAGES == field) {
return comparePagesField(stringOne, stringTwo);
} else if (StandardField.JOURNAL == field) {
return compareJournalField(stringOne, stringTwo);
} else if (StandardField.CHAPTER == field) {
return compareChapterField(stringOne, stringTwo);
}
return compareField(stringOne, stringTwo);
}
private static int compareAuthorField(final String stringOne, final String stringTwo) {
// Specific for name fields.
// Harmonise case:
final String authorOne = AuthorList.fixAuthorLastNameOnlyCommas(stringOne, false).replace(" and ", " ").toLowerCase(Locale.ROOT);
final String authorTwo = AuthorList.fixAuthorLastNameOnlyCommas(stringTwo, false).replace(" and ", " ").toLowerCase(Locale.ROOT);
final double similarity = DuplicateCheck.correlateByWords(authorOne, authorTwo);
if (similarity > 0.8) {
return EQUAL;
}
return NOT_EQUAL;
}
/**
* Pages can be given with a variety of delimiters, "-", "--", " - ", " -- ".
* We do a replace to harmonize these to a simple "-"
* After this, a simple test for equality should be enough
*/
private static int comparePagesField(final String stringOne, final String stringTwo) {
final String processedStringOne = stringOne.replaceAll("[- ]+", "-");
final String processedStringTwo = stringTwo.replaceAll("[- ]+", "-");
if (processedStringOne.equals(processedStringTwo)) {
return EQUAL;
}
return NOT_EQUAL;
}
/**
* We do not attempt to harmonize abbreviation state of the journal names,
* but we remove periods from the names in case they are abbreviated with and without dots:
*/
private static int compareJournalField(final String stringOne, final String stringTwo) {
final String processedStringOne = stringOne.replace(".", "").toLowerCase(Locale.ROOT);
final String processedStringTwo = stringTwo.replace(".", "").toLowerCase(Locale.ROOT);
final double similarity = DuplicateCheck.correlateByWords(processedStringOne, processedStringTwo);
if (similarity > 0.8) {
return EQUAL;
}
return NOT_EQUAL;
}
private static int compareChapterField(final String stringOne, final String stringTwo) {
final String processedStringOne = stringOne.replaceAll("(?i)chapter", "").trim();
final String processedStringTwo = stringTwo.replaceAll("(?i)chapter", "").trim();
return compareField(processedStringOne, processedStringTwo);
}
private static int compareField(final String stringOne, final String stringTwo) {
final String processedStringOne = StringUtil.unifyLineBreaks(stringOne.toLowerCase(Locale.ROOT).trim(), OS.NEWLINE);
final String processedStringTwo = StringUtil.unifyLineBreaks(stringTwo.toLowerCase(Locale.ROOT).trim(), OS.NEWLINE);
final double similarity = DuplicateCheck.correlateByWords(processedStringOne, processedStringTwo);
if (similarity > 0.8) {
return EQUAL;
}
return NOT_EQUAL;
}
public static double compareEntriesStrictly(BibEntry one, BibEntry two) {
final Set<Field> allFields = new HashSet<>();
allFields.addAll(one.getFields());
allFields.addAll(two.getFields());
int score = 0;
for (final Field field : allFields) {
if (isSingleFieldEqual(one, two, field)) {
score++;
}
}
if (score == allFields.size()) {
return 1.01; // Just to make sure we can use score > 1 without trouble.
}
return (double) score / allFields.size();
}
private static boolean isSingleFieldEqual(BibEntry one, BibEntry two, Field field) {
final Optional<String> stringOne = one.getField(field);
final Optional<String> stringTwo = two.getField(field);
if (stringOne.isEmpty() && stringTwo.isEmpty()) {
return true;
}
if (stringOne.isEmpty() || stringTwo.isEmpty()) {
return false;
}
return StringUtil.unifyLineBreaks(stringOne.get(), OS.NEWLINE).equals(
StringUtil.unifyLineBreaks(stringTwo.get(), OS.NEWLINE));
}
/**
* Compare two strings on the basis of word-by-word correlation analysis.
*
* @param s1 The first string
* @param s2 The second string
* @return a value in the interval [0, 1] indicating the degree of match.
*/
public static double correlateByWords(final String s1, final String s2) {
final String[] w1 = s1.split("\\s");
final String[] w2 = s2.split("\\s");
final int n = Math.min(w1.length, w2.length);
int misses = 0;
for (int i = 0; i < n; i++) {
double corr = similarity(w1[i], w2[i]);
if (corr < 0.75) {
misses++;
}
}
final double missRate = (double) misses / (double) n;
return 1 - missRate;
}
/**
* Calculates the similarity (a number within 0 and 1) between two strings.
* http://stackoverflow.com/questions/955110/similarity-string-comparison-in-java
*/
private static double similarity(final String first, final String second) {
final String longer;
final String shorter;
if (first.length() < second.length()) {
longer = second;
shorter = first;
} else {
longer = first;
shorter = second;
}
final int longerLength = longer.length();
// both strings are zero length
if (longerLength == 0) {
return 1.0;
}
final double distanceIgnoredCase = new StringSimilarity().editDistanceIgnoreCase(longer, shorter);
final double similarity = (longerLength - distanceIgnoredCase) / longerLength;
LOGGER.debug("Longer string: {} Shorter string: {} Similarity: {}", longer, shorter, similarity);
return similarity;
}
/**
* Checks if the two entries represent the same publication.
*/
public boolean isDuplicate(final BibEntry one, final BibEntry two, final BibDatabaseMode bibDatabaseMode) {
if (haveSameIdentifier(one, two)) {
return true;
}
// check DOI
Optional<DOI> oneDOI = one.getDOI();
Optional<DOI> twoDOI = two.getDOI();
if (oneDOI.isPresent() && twoDOI.isPresent()) {
return Objects.equals(oneDOI, twoDOI);
}
// check ISBN
Optional<ISBN> oneISBN = one.getISBN();
Optional<ISBN> twoISBN = two.getISBN();
if (oneISBN.isPresent() && twoISBN.isPresent()) {
return Objects.equals(oneISBN, twoISBN);
}
if (haveDifferentEntryType(one, two) ||
haveDifferentEditions(one, two) ||
haveDifferentChaptersOrPagesOfTheSameBook(one, two)) {
return false;
}
final Optional<BibEntryType> type = entryTypesManager.enrich(one.getType(), bibDatabaseMode);
if (type.isPresent()) {
BibEntryType entryType = type.get();
final double[] reqCmpResult = compareRequiredFields(entryType, one, two);
if (isFarFromThreshold(reqCmpResult[0])) {
// Far from the threshold value, so we base our decision on the required fields only
return reqCmpResult[0] >= DuplicateCheck.DUPLICATE_THRESHOLD;
}
// Close to the threshold value, so we take a look at the optional fields, if any:
if (compareOptionalFields(type.get(), one, two, reqCmpResult)) {
return true;
}
}
// if type is not present, so simply compare fields without any distinction between optional/required
// In case both required and optional fields are equal, we also use this fallback
return compareFieldSet(Sets.union(one.getFields(), two.getFields()), one, two)[0] >= DuplicateCheck.DUPLICATE_THRESHOLD;
}
/**
* Goes through all entries in the given database, and if at least one of
* them is a duplicate of the given entry, as per
* Util.isDuplicate(BibEntry, BibEntry), the duplicate is returned.
* The search is terminated when the first duplicate is found.
*
* @param database The database to search.
* @param entry The entry of which we are looking for duplicates.
* @return The first duplicate entry found. Empty Optional if no duplicates are found.
*/
public Optional<BibEntry> containsDuplicate(final BibDatabase database,
final BibEntry entry,
final BibDatabaseMode bibDatabaseMode) {
return database.getEntries().stream().filter(other -> isDuplicate(entry, other, bibDatabaseMode)).findFirst();
}
}
| 16,694
| 42.476563
| 144
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/AtomicFileOutputStream.java
|
package org.jabref.logic.exporter;
import java.io.FileOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.PosixFilePermission;
import java.util.EnumSet;
import java.util.Set;
import org.jabref.logic.util.BackupFileType;
import org.jabref.logic.util.io.FileUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A file output stream that is similar to the standard {@link FileOutputStream}, except that all writes are first
* redirected to a temporary file. When the stream is closed, the temporary file (atomically) replaces the target file.
*
* <p>
* In detail, the strategy is to:
* <ol>
* <li>Write to a temporary file (with .tmp suffix) in the same directory as the destination file.</li>
* <li>Create a backup (with .bak suffix) of the original file (if it exists) in the same directory.</li>
* <li>Move the temporary file to the correct place, overwriting any file that already exists at that location.</li>
* <li>Delete the backup file (if configured to do so).</li>
* </ol>
* If all goes well, no temporary or backup files will remain on disk after closing the stream.
* <p>
* Errors are handled as follows:
* <ol>
* <li>If anything goes wrong while writing to the temporary file, the temporary file will be deleted (leaving the
* original file untouched).</li>
* <li>If anything goes wrong while copying the temporary file to the target file, the backup of the original file is
* kept.</li>
* </ol>
* <p>
* Implementation inspired by code from <a href="https://github.com/martylamb/atomicfileoutputstream/blob/master/src/main/java/com/martiansoftware/io/AtomicFileOutputStream.java">Marty
* Lamb</a> and <a href="https://github.com/apache/zookeeper/blob/master/src/java/main/org/apache/zookeeper/common/AtomicFileOutputStream.java">Apache</a>.
*/
public class AtomicFileOutputStream extends FilterOutputStream {
private static final Logger LOGGER = LoggerFactory.getLogger(AtomicFileOutputStream.class);
private static final String TEMPORARY_EXTENSION = ".tmp";
private static final String SAVE_EXTENSION = "." + BackupFileType.SAVE.getExtensions().get(0);
/**
* The file we want to create/replace.
*/
private final Path targetFile;
/**
* The file to which writes are redirected to.
*/
private final Path temporaryFile;
private final FileLock temporaryFileLock;
/**
* A backup of the target file (if it exists), created when the stream is closed
*/
private final Path backupFile;
private final boolean keepBackup;
private boolean errorDuringWrite = false;
/**
* Creates a new output stream to write to or replace the file at the specified path.
*
* @param path the path of the file to write to or replace
* @param keepBackup whether to keep the backup file (.sav) after a successful write process
*/
public AtomicFileOutputStream(Path path, boolean keepBackup) throws IOException {
// Files.newOutputStream(getPathOfTemporaryFile(path)) leads to a "sun.nio.ch.ChannelOutputStream", which does not offer "lock"
this(path, getPathOfTemporaryFile(path), new FileOutputStream(getPathOfTemporaryFile(path).toFile()), keepBackup);
}
/**
* Creates a new output stream to write to or replace the file at the specified path.
* The backup file (.sav) is deleted when write was successful.
*
* @param path the path of the file to write to or replace
*/
public AtomicFileOutputStream(Path path) throws IOException {
this(path, false);
}
/**
* Required for proper testing
*/
AtomicFileOutputStream(Path path, Path pathOfTemporaryFile, OutputStream temporaryFileOutputStream, boolean keepBackup) throws IOException {
super(temporaryFileOutputStream);
this.targetFile = path;
this.temporaryFile = pathOfTemporaryFile;
this.backupFile = getPathOfSaveBackupFile(path);
this.keepBackup = keepBackup;
try {
// Lock files (so that at least not another JabRef instance writes at the same time to the same tmp file)
if (out instanceof FileOutputStream stream) {
temporaryFileLock = stream.getChannel().lock();
} else {
temporaryFileLock = null;
}
} catch (OverlappingFileLockException exception) {
throw new IOException("Could not obtain write access to " + temporaryFile + ". Maybe another instance of JabRef is currently writing to the same file?", exception);
}
}
private static Path getPathOfTemporaryFile(Path targetFile) {
return FileUtil.addExtension(targetFile, TEMPORARY_EXTENSION);
}
private static Path getPathOfSaveBackupFile(Path targetFile) {
return FileUtil.addExtension(targetFile, SAVE_EXTENSION);
}
/**
* Returns the path of the backup copy of the original file (may not exist)
*/
public Path getBackup() {
return backupFile;
}
/**
* Overridden because of cleanup actions in case of an error
*/
@Override
public void write(byte b[], int off, int len) throws IOException {
try {
out.write(b, off, len);
} catch (IOException exception) {
cleanup();
errorDuringWrite = true;
throw exception;
}
}
/**
* Closes the write process to the temporary file but does not commit to the target file.
*/
public void abort() {
errorDuringWrite = true;
try {
super.close();
Files.deleteIfExists(temporaryFile);
Files.deleteIfExists(backupFile);
} catch (IOException exception) {
LOGGER.debug("Unable to abort writing to file {}", temporaryFile, exception);
}
}
private void cleanup() {
try {
if (temporaryFileLock != null) {
temporaryFileLock.release();
}
} catch (IOException exception) {
// Currently, we always get the exception:
// Unable to release lock on file C:\Users\koppor\AppData\Local\Temp\junit11976839611279549873\error-during-save.txt.tmp: java.nio.channels.ClosedChannelException
LOGGER.debug("Unable to release lock on file {}", temporaryFile, exception);
}
try {
Files.deleteIfExists(temporaryFile);
} catch (IOException exception) {
LOGGER.debug("Unable to delete file {}", temporaryFile, exception);
}
}
/**
* perform the final operations to move the temporary file to its final destination
*/
@Override
public void close() throws IOException {
try {
try {
// Make sure we have written everything to the temporary file
flush();
if (out instanceof FileOutputStream stream) {
stream.getFD().sync();
}
} catch (IOException exception) {
// Try to close nonetheless
super.close();
throw exception;
}
super.close();
if (errorDuringWrite) {
// in case there was an error during write, we do not replace the original file
return;
}
// We successfully wrote everything to the temporary file, lets copy it to the correct place
// First, make backup of original file and try to save file permissions to restore them later (by default: 664)
Set<PosixFilePermission> oldFilePermissions = EnumSet.of(PosixFilePermission.OWNER_READ,
PosixFilePermission.OWNER_WRITE,
PosixFilePermission.GROUP_READ,
PosixFilePermission.GROUP_WRITE,
PosixFilePermission.OTHERS_READ);
if (Files.exists(targetFile)) {
try {
Files.copy(targetFile, backupFile, StandardCopyOption.REPLACE_EXISTING);
} catch (Exception e) {
LOGGER.warn("Could not create backup file {}", backupFile);
}
if (FileUtil.IS_POSIX_COMPLIANT) {
try {
oldFilePermissions = Files.getPosixFilePermissions(targetFile);
} catch (IOException exception) {
LOGGER.warn("Error getting file permissions for file {}.", targetFile, exception);
}
}
}
try {
// Move temporary file (replace original if it exists)
Files.move(temporaryFile, targetFile, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
} catch (Exception e) {
LOGGER.warn("Could not move temporary file", e);
throw e;
}
// Restore file permissions
if (FileUtil.IS_POSIX_COMPLIANT) {
try {
Files.setPosixFilePermissions(targetFile, oldFilePermissions);
} catch (IOException exception) {
LOGGER.warn("Error writing file permissions to file {}.", targetFile, exception);
}
}
if (!keepBackup) {
// Remove backup file for saving
Files.deleteIfExists(backupFile);
}
} finally {
// Remove temporary file (but not the backup!)
cleanup();
}
}
@Override
public void flush() throws IOException {
try {
super.flush();
} catch (IOException exception) {
cleanup();
throw exception;
}
}
@Override
public void write(int b) throws IOException {
try {
super.write(b);
} catch (IOException exception) {
cleanup();
throw exception;
}
}
}
| 10,254
| 36.702206
| 184
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/AtomicFileWriter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.nio.charset.CharsetEncoder;
import java.nio.file.Path;
import java.util.Collections;
import java.util.Set;
import java.util.TreeSet;
/**
* Writer that similar to the built-in {@link java.io.FileWriter} but uses the {@link AtomicFileOutputStream} as the
* underlying output stream. In this way, we make sure that the errors during the write process do not destroy the
* contents of the target file.
* Moreover, this writer checks if the chosen encoding supports all text that is written. Characters whose encoding
* was problematic can be retrieved by {@link #getEncodingProblems()}.
*/
public class AtomicFileWriter extends OutputStreamWriter {
private final CharsetEncoder encoder;
private final Set<Character> problemCharacters = new TreeSet<>();
public AtomicFileWriter(Path file, Charset encoding) throws IOException {
this(file, encoding, false);
}
public AtomicFileWriter(Path file, Charset encoding, boolean keepBackup) throws IOException {
super(new AtomicFileOutputStream(file, keepBackup), encoding);
encoder = encoding.newEncoder();
}
@Override
public void write(String str) throws IOException {
super.write(str);
if (!encoder.canEncode(str)) {
for (int i = 0; i < str.length(); i++) {
char character = str.charAt(i);
if (!encoder.canEncode(character)) {
problemCharacters.add(character);
}
}
}
}
public boolean hasEncodingProblems() {
return !problemCharacters.isEmpty();
}
public Set<Character> getEncodingProblems() {
return Collections.unmodifiableSet(problemCharacters);
}
}
| 1,856
| 33.388889
| 116
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/BibDatabaseWriter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import org.jabref.logic.bibtex.comparator.BibtexStringComparator;
import org.jabref.logic.bibtex.comparator.CrossRefEntryComparator;
import org.jabref.logic.bibtex.comparator.FieldComparator;
import org.jabref.logic.bibtex.comparator.FieldComparatorStack;
import org.jabref.logic.bibtex.comparator.IdComparator;
import org.jabref.logic.citationkeypattern.CitationKeyGenerator;
import org.jabref.logic.citationkeypattern.CitationKeyPatternPreferences;
import org.jabref.logic.citationkeypattern.GlobalCitationKeyPattern;
import org.jabref.logic.cleanup.FieldFormatterCleanup;
import org.jabref.logic.cleanup.FieldFormatterCleanups;
import org.jabref.logic.formatter.bibtexfields.TrimWhitespaceFormatter;
import org.jabref.model.FieldChange;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.BibtexString;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.metadata.SaveOrder;
import org.jabref.model.strings.StringUtil;
import org.jooq.lambda.Unchecked;
/**
* A generic writer for our database. This is independent of the concrete serialization format.
* For instance, we could also write out YAML or XML by subclassing this class.
* <p>
* Currently, {@link BibtexDatabaseWriter} is the only subclass of this class (and that class writes a .bib file)
* <p>
* The opposite class is {@link org.jabref.logic.importer.fileformat.BibtexParser}
*/
public abstract class BibDatabaseWriter {
public enum SaveType { ALL, PLAIN_BIBTEX }
private static final Pattern REFERENCE_PATTERN = Pattern.compile("(#[A-Za-z]+#)"); // Used to detect string references in strings
protected final BibWriter bibWriter;
protected final SaveConfiguration saveConfiguration;
protected final CitationKeyPatternPreferences keyPatternPreferences;
protected final List<FieldChange> saveActionsFieldChanges = new ArrayList<>();
protected final BibEntryTypesManager entryTypesManager;
public BibDatabaseWriter(BibWriter bibWriter,
SaveConfiguration saveConfiguration,
CitationKeyPatternPreferences keyPatternPreferences,
BibEntryTypesManager entryTypesManager) {
this.bibWriter = Objects.requireNonNull(bibWriter);
this.saveConfiguration = saveConfiguration;
this.keyPatternPreferences = keyPatternPreferences;
this.entryTypesManager = entryTypesManager;
}
private static List<FieldChange> applySaveActions(List<BibEntry> toChange, MetaData metaData) {
List<FieldChange> changes = new ArrayList<>();
Optional<FieldFormatterCleanups> saveActions = metaData.getSaveActions();
saveActions.ifPresent(actions -> {
// save actions defined -> apply for every entry
for (BibEntry entry : toChange) {
changes.addAll(actions.applySaveActions(entry));
}
});
// Run standard cleanups
List<FieldFormatterCleanup> preSaveCleanups =
Stream.of(new TrimWhitespaceFormatter())
.map(formatter -> new FieldFormatterCleanup(InternalField.INTERNAL_ALL_FIELD, formatter))
.toList();
for (FieldFormatterCleanup formatter : preSaveCleanups) {
for (BibEntry entry : toChange) {
changes.addAll(formatter.cleanup(entry));
}
}
return changes;
}
public static List<FieldChange> applySaveActions(BibEntry entry, MetaData metaData) {
return applySaveActions(Collections.singletonList(entry), metaData);
}
private static List<Comparator<BibEntry>> getSaveComparators(MetaData metaData, SaveConfiguration preferences) {
List<Comparator<BibEntry>> comparators = new ArrayList<>();
Optional<SaveOrder> saveOrder = getSaveOrder(metaData, preferences);
// Take care, using CrossRefEntry-Comparator, that referred entries occur after referring
// ones. This is a necessary requirement for BibTeX to be able to resolve referenced entries correctly.
comparators.add(new CrossRefEntryComparator());
if (saveOrder.isEmpty() || saveOrder.get().getOrderType() == SaveOrder.OrderType.ORIGINAL) {
// entries will be sorted based on their internal IDs
comparators.add(new IdComparator());
} else {
// use configured sorting strategy
List<FieldComparator> fieldComparators = saveOrder.get()
.getSortCriteria().stream()
.map(FieldComparator::new)
.toList();
comparators.addAll(fieldComparators);
comparators.add(new FieldComparator(InternalField.KEY_FIELD));
}
return comparators;
}
/**
* We have begun to use getSortedEntries() for both database save operations and non-database save operations. In a
* non-database save operation (such as the exportDatabase call), we do not wish to use the global preference of
* saving in standard order.
*/
public static List<BibEntry> getSortedEntries(BibDatabaseContext bibDatabaseContext, List<BibEntry> entriesToSort, SaveConfiguration preferences) {
Objects.requireNonNull(bibDatabaseContext);
Objects.requireNonNull(entriesToSort);
// if no meta data are present, simply return in original order
if (bibDatabaseContext.getMetaData() == null) {
return new LinkedList<>(entriesToSort);
}
List<Comparator<BibEntry>> comparators = getSaveComparators(bibDatabaseContext.getMetaData(), preferences);
FieldComparatorStack<BibEntry> comparatorStack = new FieldComparatorStack<>(comparators);
List<BibEntry> sorted = new ArrayList<>(entriesToSort);
sorted.sort(comparatorStack);
return sorted;
}
private static Optional<SaveOrder> getSaveOrder(MetaData metaData, SaveConfiguration saveConfiguration) {
/* two options:
* 1. order specified in metaData
* 2. original order
*/
if (saveConfiguration.useMetadataSaveOrder()) {
return metaData.getSaveOrderConfig();
}
if (saveConfiguration.getSaveOrder().getOrderType() == SaveOrder.OrderType.ORIGINAL) {
return Optional.empty();
}
return Optional.ofNullable(saveConfiguration.getSaveOrder());
}
public List<FieldChange> getSaveActionsFieldChanges() {
return Collections.unmodifiableList(saveActionsFieldChanges);
}
/**
* Saves the complete database.
*/
public void saveDatabase(BibDatabaseContext bibDatabaseContext) throws IOException {
List<BibEntry> entries = bibDatabaseContext.getDatabase().getEntries()
.stream()
.filter(entry -> !entry.isEmpty())
.toList();
savePartOfDatabase(bibDatabaseContext, entries);
}
/**
* Saves the database, including only the specified entries.
*
* @param entries A list of entries to save. The list itself is not modified in this code
*/
public void savePartOfDatabase(BibDatabaseContext bibDatabaseContext, List<BibEntry> entries) throws IOException {
Optional<String> sharedDatabaseIDOptional = bibDatabaseContext.getDatabase().getSharedDatabaseID();
sharedDatabaseIDOptional.ifPresent(Unchecked.consumer(id -> writeDatabaseID(id)));
// Some file formats write something at the start of the file (like the encoding)
if (saveConfiguration.getSaveType() != SaveType.PLAIN_BIBTEX) {
Charset charset = bibDatabaseContext.getMetaData().getEncoding().orElse(StandardCharsets.UTF_8);
writeProlog(bibDatabaseContext, charset);
}
bibWriter.finishBlock();
// Write preamble if there is one.
writePreamble(bibDatabaseContext.getDatabase().getPreamble().orElse(""));
// Write strings if there are any.
writeStrings(bibDatabaseContext.getDatabase());
// Write database entries.
List<BibEntry> sortedEntries = getSortedEntries(bibDatabaseContext, entries, saveConfiguration);
List<FieldChange> saveActionChanges = applySaveActions(sortedEntries, bibDatabaseContext.getMetaData());
saveActionsFieldChanges.addAll(saveActionChanges);
if (keyPatternPreferences.shouldGenerateCiteKeysBeforeSaving()) {
List<FieldChange> keyChanges = generateCitationKeys(bibDatabaseContext, sortedEntries);
saveActionsFieldChanges.addAll(keyChanges);
}
// Map to collect entry type definitions that we must save along with entries using them.
SortedSet<BibEntryType> typesToWrite = new TreeSet<>();
for (BibEntry entry : sortedEntries) {
// Check if we must write the type definition for this
// entry, as well. Our criterion is that all non-standard
// types (*not* all customized standard types) must be written.
if (entryTypesManager.isCustomType(entry.getType(), bibDatabaseContext.getMode())) {
// If user-defined entry type, then add it
// Otherwise (enrich returns empty optional) it is a completely unknown entry type, so ignore it
entryTypesManager.enrich(entry.getType(), bibDatabaseContext.getMode()).ifPresent(typesToWrite::add);
}
writeEntry(entry, bibDatabaseContext.getMode());
}
if (saveConfiguration.getSaveType() != SaveType.PLAIN_BIBTEX) {
// Write meta data.
writeMetaData(bibDatabaseContext.getMetaData(), keyPatternPreferences.getKeyPattern());
// Write type definitions, if any:
writeEntryTypeDefinitions(typesToWrite);
}
// finally write whatever remains of the file, but at least a concluding newline
writeEpilogue(bibDatabaseContext.getDatabase().getEpilog());
}
protected abstract void writeProlog(BibDatabaseContext bibDatabaseContext, Charset encoding) throws IOException;
protected abstract void writeEntry(BibEntry entry, BibDatabaseMode mode) throws IOException;
protected abstract void writeEpilogue(String epilogue) throws IOException;
/**
* Writes all data to the specified writer, using each object's toString() method.
*/
protected void writeMetaData(MetaData metaData, GlobalCitationKeyPattern globalCiteKeyPattern) throws IOException {
Objects.requireNonNull(metaData);
Map<String, String> serializedMetaData = MetaDataSerializer.getSerializedStringMap(metaData,
globalCiteKeyPattern);
for (Map.Entry<String, String> metaItem : serializedMetaData.entrySet()) {
writeMetaDataItem(metaItem);
}
}
protected abstract void writeMetaDataItem(Map.Entry<String, String> metaItem) throws IOException;
protected abstract void writePreamble(String preamble) throws IOException;
protected abstract void writeDatabaseID(String sharedDatabaseID) throws IOException;
/**
* Write all strings in alphabetical order, modified to produce a safe (for BibTeX) order of the strings if they
* reference each other.
*
* @param database The database whose strings we should write.
*/
private void writeStrings(BibDatabase database) throws IOException {
List<BibtexString> strings = database.getStringKeySet()
.stream()
.map(database::getString)
.sorted(new BibtexStringComparator(true))
.toList();
// First, make a Map of all entries:
Map<String, BibtexString> remaining = new HashMap<>();
int maxKeyLength = 0;
for (BibtexString string : strings) {
remaining.put(string.getName(), string);
maxKeyLength = Math.max(maxKeyLength, string.getName().length());
}
for (BibtexString.Type t : BibtexString.Type.values()) {
for (BibtexString bs : strings) {
if (remaining.containsKey(bs.getName()) && (bs.getType() == t)) {
writeString(bs, remaining, maxKeyLength);
}
}
}
bibWriter.finishBlock();
}
protected void writeString(BibtexString bibtexString, Map<String, BibtexString> remaining, int maxKeyLength)
throws IOException {
// First remove this from the "remaining" list so it can't cause problem with circular refs:
remaining.remove(bibtexString.getName());
// Then we go through the string looking for references to other strings. If we find references
// to strings that we will write, but still haven't, we write those before proceeding. This ensures
// that the string order will be acceptable for BibTeX.
String content = bibtexString.getContent();
Matcher m;
while ((m = REFERENCE_PATTERN.matcher(content)).find()) {
String foundLabel = m.group(1);
int restIndex = content.indexOf(foundLabel) + foundLabel.length();
content = content.substring(restIndex);
String label = foundLabel.substring(1, foundLabel.length() - 1);
// If the label we found exists as a key in the "remaining" Map, we go on and write it now:
if (remaining.containsKey(label)) {
BibtexString referred = remaining.get(label);
writeString(referred, remaining, maxKeyLength);
}
}
writeString(bibtexString, maxKeyLength);
}
protected abstract void writeString(BibtexString bibtexString, int maxKeyLength)
throws IOException;
protected void writeEntryTypeDefinitions(SortedSet<BibEntryType> types) throws IOException {
for (BibEntryType type : types) {
writeEntryTypeDefinition(type);
}
}
protected abstract void writeEntryTypeDefinition(BibEntryType customType) throws IOException;
/**
* Generate keys for all entries that are lacking keys.
*/
protected List<FieldChange> generateCitationKeys(BibDatabaseContext databaseContext, List<BibEntry> entries) {
List<FieldChange> changes = new ArrayList<>();
CitationKeyGenerator keyGenerator = new CitationKeyGenerator(databaseContext, keyPatternPreferences);
for (BibEntry bes : entries) {
Optional<String> oldKey = bes.getCitationKey();
if (StringUtil.isBlank(oldKey)) {
Optional<FieldChange> change = keyGenerator.generateAndSetKey(bes);
change.ifPresent(changes::add);
}
}
return changes;
}
}
| 15,856
| 43.92068
| 151
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/BibWriter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.io.Writer;
import org.jabref.model.strings.StringUtil;
/**
* Class to write to a .bib file. Used by {@link BibtexDatabaseWriter}
*/
public class BibWriter {
private final String newLineSeparator;
private final Writer writer;
private boolean precedingNewLineRequired = false;
private boolean somethingWasWritten = false;
private boolean lastWriteWasNewline = false;
/**
* @param newLineSeparator the string used for a line break
*/
public BibWriter(Writer writer, String newLineSeparator) {
this.writer = writer;
this.newLineSeparator = newLineSeparator;
}
/**
* Writes the given string. The newlines of the given string are converted to the newline set for this class
*/
public void write(String string) throws IOException {
if (precedingNewLineRequired) {
writer.write(newLineSeparator);
precedingNewLineRequired = false;
}
string = StringUtil.unifyLineBreaks(string, newLineSeparator);
writer.write(string);
lastWriteWasNewline = string.endsWith(newLineSeparator);
somethingWasWritten = true;
}
/**
* Writes the given string and finishes it with a line break
*/
public void writeLine(String string) throws IOException {
this.write(string);
this.finishLine();
}
/**
* Finishes a line
*/
public void finishLine() throws IOException {
if (!this.lastWriteWasNewline) {
this.write(newLineSeparator);
}
}
/**
* Finishes a block
*/
public void finishBlock() throws IOException {
if (!somethingWasWritten) {
return;
}
if (!lastWriteWasNewline) {
this.finishLine();
}
this.somethingWasWritten = false;
this.precedingNewLineRequired = true;
}
}
| 1,958
| 25.835616
| 112
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/BibtexDatabaseWriter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.jabref.logic.bibtex.BibEntryWriter;
import org.jabref.logic.bibtex.FieldPreferences;
import org.jabref.logic.bibtex.FieldWriter;
import org.jabref.logic.bibtex.InvalidFieldValueException;
import org.jabref.logic.citationkeypattern.CitationKeyPatternPreferences;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.model.entry.BibtexString;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.strings.StringUtil;
/**
* Writes a .bib file following the BibTeX / BibLaTeX format using the provided {@link BibWriter}
*/
public class BibtexDatabaseWriter extends BibDatabaseWriter {
public static final String DATABASE_ID_PREFIX = "DBID:";
private static final String COMMENT_PREFIX = "@Comment";
private static final String PREAMBLE_PREFIX = "@Preamble";
private static final String STRING_PREFIX = "@String";
private final FieldPreferences fieldPreferences;
public BibtexDatabaseWriter(BibWriter bibWriter,
SaveConfiguration saveConfiguration,
FieldPreferences fieldPreferences,
CitationKeyPatternPreferences citationKeyPatternPreferences,
BibEntryTypesManager entryTypesManager) {
super(bibWriter,
saveConfiguration,
citationKeyPatternPreferences,
entryTypesManager);
this.fieldPreferences = fieldPreferences;
}
public BibtexDatabaseWriter(Writer writer,
String newline,
SaveConfiguration saveConfiguration,
FieldPreferences fieldPreferences,
CitationKeyPatternPreferences citationKeyPatternPreferences,
BibEntryTypesManager entryTypesManager) {
super(new BibWriter(writer, newline),
saveConfiguration,
citationKeyPatternPreferences,
entryTypesManager);
this.fieldPreferences = fieldPreferences;
}
@Override
protected void writeEpilogue(String epilogue) throws IOException {
if (!StringUtil.isNullOrEmpty(epilogue)) {
bibWriter.write(epilogue);
bibWriter.finishBlock();
}
}
@Override
protected void writeMetaDataItem(Map.Entry<String, String> metaItem) throws IOException {
bibWriter.write(COMMENT_PREFIX + "{");
bibWriter.write(MetaData.META_FLAG);
bibWriter.write(metaItem.getKey());
bibWriter.write(":");
bibWriter.write(metaItem.getValue());
bibWriter.write("}");
bibWriter.finishBlock();
}
@Override
protected void writePreamble(String preamble) throws IOException {
if (!StringUtil.isNullOrEmpty(preamble)) {
bibWriter.write(PREAMBLE_PREFIX + "{");
bibWriter.write(preamble);
bibWriter.writeLine("}");
bibWriter.finishBlock();
}
}
@Override
protected void writeString(BibtexString bibtexString, int maxKeyLength) throws IOException {
// If the string has not been modified, write it back as it was
if (!saveConfiguration.shouldReformatFile() && !bibtexString.hasChanged()) {
bibWriter.write(bibtexString.getParsedSerialization());
return;
}
// Write user comments
String userComments = bibtexString.getUserComments();
if (!userComments.isEmpty()) {
bibWriter.writeLine(userComments);
}
bibWriter.write(STRING_PREFIX + "{" + bibtexString.getName() + StringUtil
.repeatSpaces(maxKeyLength - bibtexString.getName().length()) + " = ");
if (bibtexString.getContent().isEmpty()) {
bibWriter.write("{}");
} else {
try {
String formatted = new FieldWriter(fieldPreferences)
.write(InternalField.BIBTEX_STRING, bibtexString.getContent());
bibWriter.write(formatted);
} catch (InvalidFieldValueException ex) {
throw new IOException(ex);
}
}
bibWriter.writeLine("}");
}
@Override
protected void writeEntryTypeDefinition(BibEntryType customType) throws IOException {
bibWriter.write(COMMENT_PREFIX + "{");
bibWriter.write(MetaDataSerializer.serializeCustomEntryTypes(customType));
bibWriter.writeLine("}");
bibWriter.finishBlock();
}
@Override
protected void writeProlog(BibDatabaseContext bibDatabaseContext, Charset encoding) throws IOException {
// We write the encoding if
// - it is provided (!= null)
// - explicitly set in the .bib file OR not equal to UTF_8
// Otherwise, we do not write anything and return
if ((encoding == null) || (!bibDatabaseContext.getMetaData().getEncodingExplicitlySupplied() && (encoding.equals(StandardCharsets.UTF_8)))) {
return;
}
// Writes the file encoding information.
bibWriter.write("% ");
bibWriter.writeLine(SaveConfiguration.ENCODING_PREFIX + encoding);
}
@Override
protected void writeDatabaseID(String sharedDatabaseID) throws IOException {
bibWriter.write("% ");
bibWriter.write(DATABASE_ID_PREFIX);
bibWriter.write(" ");
bibWriter.writeLine(sharedDatabaseID);
}
@Override
protected void writeEntry(BibEntry entry, BibDatabaseMode mode) throws IOException {
BibEntryWriter bibtexEntryWriter = new BibEntryWriter(new FieldWriter(fieldPreferences), entryTypesManager);
bibtexEntryWriter.write(entry, bibWriter, mode, saveConfiguration.shouldReformatFile());
}
}
| 6,233
| 37.720497
| 149
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/BlankLineBehaviour.java
|
package org.jabref.logic.exporter;
/**
* This enum represents the behaviour for blank lines in {@link TemplateExporter}
*/
public enum BlankLineBehaviour {
KEEP_BLANKS,
DELETE_BLANKS
}
| 196
| 18.7
| 81
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/EmbeddedBibFilePdfExporter.java
|
package org.jabref.logic.exporter;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.jabref.logic.bibtex.BibEntryWriter;
import org.jabref.logic.bibtex.FieldPreferences;
import org.jabref.logic.bibtex.FieldWriter;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.logic.xmp.XmpUtilWriter;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.BibEntryTypesManager;
import org.apache.pdfbox.Loader;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDDocumentNameDictionary;
import org.apache.pdfbox.pdmodel.PDEmbeddedFilesNameTreeNode;
import org.apache.pdfbox.pdmodel.common.filespecification.PDComplexFileSpecification;
import org.apache.pdfbox.pdmodel.common.filespecification.PDEmbeddedFile;
/**
* A custom exporter to write bib entries to an embedded bib file.
*/
public class EmbeddedBibFilePdfExporter extends Exporter {
public static String EMBEDDED_FILE_NAME = "main.bib";
private final BibDatabaseMode bibDatabaseMode;
private final BibEntryTypesManager bibEntryTypesManager;
private final FieldPreferences fieldPreferences;
public EmbeddedBibFilePdfExporter(BibDatabaseMode bibDatabaseMode, BibEntryTypesManager bibEntryTypesManager, FieldPreferences fieldPreferences) {
super("bib", "Embedded BibTeX", StandardFileType.PDF);
this.bibDatabaseMode = bibDatabaseMode;
this.bibEntryTypesManager = bibEntryTypesManager;
this.fieldPreferences = fieldPreferences;
}
/**
* @param databaseContext the database to export from
* @param file the file to write to. If it contains "split", then the output is split into different files
* @param entries a list containing all entries that should be exported
*/
@Override
public void export(BibDatabaseContext databaseContext, Path file, List<BibEntry> entries) throws Exception {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(file);
Objects.requireNonNull(entries);
String bibString = getBibString(entries);
embedBibTex(bibString, file);
}
/**
* Similar method: {@link XmpUtilWriter#writeXmp(Path, BibEntry, org.jabref.model.database.BibDatabase)}
*/
private void embedBibTex(String bibTeX, Path path) throws IOException {
if (!Files.exists(path) || !FileUtil.isPDFFile(path)) {
return;
}
// Read from another file
// Reason: Apache PDFBox does not support writing while the file is opened
// See https://issues.apache.org/jira/browse/PDFBOX-4028
Path newFile = Files.createTempFile("JabRef", "pdf");
try (PDDocument document = Loader.loadPDF(path.toFile())) {
PDDocumentNameDictionary nameDictionary = document.getDocumentCatalog().getNames();
PDEmbeddedFilesNameTreeNode efTree;
Map<String, PDComplexFileSpecification> names;
if (nameDictionary == null) {
efTree = new PDEmbeddedFilesNameTreeNode();
names = new HashMap<>();
nameDictionary = new PDDocumentNameDictionary(document.getDocumentCatalog());
nameDictionary.setEmbeddedFiles(efTree);
document.getDocumentCatalog().setNames(nameDictionary);
} else {
efTree = nameDictionary.getEmbeddedFiles();
if (efTree == null) {
efTree = new PDEmbeddedFilesNameTreeNode();
nameDictionary.setEmbeddedFiles(efTree);
}
names = efTree.getNames();
if (names == null) {
names = new HashMap<>();
efTree.setNames(names);
}
}
PDComplexFileSpecification fileSpecification;
if (names.containsKey(EMBEDDED_FILE_NAME)) {
fileSpecification = names.get(EMBEDDED_FILE_NAME);
} else {
fileSpecification = new PDComplexFileSpecification();
}
if (efTree != null) {
InputStream inputStream = new ByteArrayInputStream(bibTeX.getBytes(StandardCharsets.UTF_8));
fileSpecification.setFile(EMBEDDED_FILE_NAME);
PDEmbeddedFile embeddedFile = new PDEmbeddedFile(document, inputStream);
embeddedFile.setSubtype("text/x-bibtex");
embeddedFile.setSize(bibTeX.length());
fileSpecification.setEmbeddedFile(embeddedFile);
if (!names.containsKey(EMBEDDED_FILE_NAME)) {
try {
names.put(EMBEDDED_FILE_NAME, fileSpecification);
} catch (UnsupportedOperationException e) {
throw new IOException(Localization.lang("File '%0' is write protected.", path.toString()));
}
}
efTree.setNames(names);
nameDictionary.setEmbeddedFiles(efTree);
document.getDocumentCatalog().setNames(nameDictionary);
}
document.save(newFile.toFile());
FileUtil.copyFile(newFile, path, true);
}
Files.delete(newFile);
}
private String getBibString(List<BibEntry> entries) throws IOException {
StringWriter stringWriter = new StringWriter();
BibWriter bibWriter = new BibWriter(stringWriter, OS.NEWLINE);
FieldWriter fieldWriter = FieldWriter.buildIgnoreHashes(fieldPreferences);
BibEntryWriter bibEntryWriter = new BibEntryWriter(fieldWriter, bibEntryTypesManager);
for (BibEntry entry : entries) {
bibEntryWriter.write(entry, bibWriter, bibDatabaseMode);
}
return stringWriter.toString();
}
}
| 6,289
| 41.789116
| 150
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/Exporter.java
|
package org.jabref.logic.exporter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.jabref.logic.journals.JournalAbbreviationRepository;
import org.jabref.logic.util.FileType;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.preferences.FilePreferences;
public abstract class Exporter {
private final String id;
private final String displayName;
private final FileType fileType;
public Exporter(String id, String displayName, FileType extension) {
this.id = id;
this.displayName = displayName;
this.fileType = extension;
}
/**
* Returns a one-word ID (used, for example, to identify the exporter in the console).
*/
public String getId() {
return id;
}
/**
* Returns the name of the exporter (to display to the user).
*/
public String getName() {
return displayName;
}
/**
* Returns the type of files this exporter creates.
*/
public FileType getFileType() {
return fileType;
}
@Override
public String toString() {
return displayName;
}
/**
* Performs the export.
*
* @param databaseContext the database to export from
* @param file the file to write to
* @param entries a list containing all entries that should be exported
*/
public abstract void export(BibDatabaseContext databaseContext, Path file, List<BibEntry> entries) throws Exception;
public void export(BibDatabaseContext databaseContext, Path file, List<BibEntry> entries, List<Path> fileDirForDatabase, JournalAbbreviationRepository abbreviationRepository) throws Exception {
export(databaseContext, file, entries);
}
/**
* Exports to all files linked to a given entry
*
* @param databaseContext the database to export from
* @param filePreferences the filePreferences to use for resolving paths
* @param entryToWriteOn the entry for which we want to write on all linked pdfs
* @param entriesToWrite the content that we want to export to the pdfs
* @param abbreviationRepository the opened repository of journal abbreviations
* @return whether any file was written on
* @throws Exception if the writing fails
*/
public boolean exportToAllFilesOfEntry(BibDatabaseContext databaseContext,
FilePreferences filePreferences,
BibEntry entryToWriteOn,
List<BibEntry> entriesToWrite,
JournalAbbreviationRepository abbreviationRepository) throws Exception {
boolean writtenToAFile = false;
for (LinkedFile file : entryToWriteOn.getFiles()) {
if (file.getFileType().equals(fileType.getName())) {
Optional<Path> filePath = file.findIn(databaseContext, filePreferences);
if (filePath.isPresent()) {
export(databaseContext, filePath.get(), entriesToWrite, Collections.emptyList(), abbreviationRepository);
writtenToAFile = true;
}
}
}
return writtenToAFile;
}
/**
* Exports bib-entries a file is linked to
* Behaviour in case the file is linked to different bib-entries depends on the implementation of {@link #export}.
* If it overwrites any existing information, only the last found bib-entry will be exported (as the previous exports are overwritten).
* If it extends existing information, all found bib-entries will be exported.
*
* @param databaseContext the database-context to export from
* @param dataBase the database to export from
* @param filePreferences the filePreferences to use for resolving paths
* @param filePath the path to the file we want to write on
* @param abbreviationRepository the opened repository of journal abbreviations
* @return whether the file was written on at least once
* @throws Exception if the writing fails
*/
public boolean exportToFileByPath(BibDatabaseContext databaseContext,
BibDatabase dataBase,
FilePreferences filePreferences,
Path filePath,
JournalAbbreviationRepository abbreviationRepository) throws Exception {
if (!Files.exists(filePath)) {
return false;
}
boolean writtenABibEntry = false;
for (BibEntry entry : dataBase.getEntries()) {
for (LinkedFile linkedFile : entry.getFiles()) {
if (linkedFile.getFileType().equals(fileType.getName())) {
Optional<Path> linkedFilePath = linkedFile.findIn(databaseContext.getFileDirectories(filePreferences));
if (linkedFilePath.isPresent() && Files.exists(linkedFilePath.get()) && Files.isSameFile(linkedFilePath.get(), filePath)) {
export(databaseContext, filePath, List.of(entry), Collections.emptyList(), abbreviationRepository);
writtenABibEntry = true;
}
}
}
}
return writtenABibEntry;
}
}
| 5,633
| 40.426471
| 197
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/ExporterFactory.java
|
package org.jabref.logic.exporter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.jabref.logic.bibtex.FieldPreferences;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.layout.LayoutFormatterPreferences;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.XmpPreferences;
import org.jabref.model.database.BibDatabaseMode;
import org.jabref.model.entry.BibEntryTypesManager;
import org.jabref.preferences.PreferencesService;
public class ExporterFactory {
private final List<Exporter> exporters;
private ExporterFactory(List<Exporter> exporters) {
this.exporters = Objects.requireNonNull(exporters);
}
public static ExporterFactory create(PreferencesService preferencesService,
BibEntryTypesManager entryTypesManager) {
List<TemplateExporter> customFormats = preferencesService.getExportPreferences().getCustomExporters();
LayoutFormatterPreferences layoutPreferences = preferencesService.getLayoutFormatterPreferences();
SaveConfiguration saveConfiguration = preferencesService.getExportConfiguration();
XmpPreferences xmpPreferences = preferencesService.getXmpPreferences();
FieldPreferences fieldPreferences = preferencesService.getFieldPreferences();
BibDatabaseMode bibDatabaseMode = preferencesService.getLibraryPreferences().getDefaultBibDatabaseMode();
List<Exporter> exporters = new ArrayList<>();
// Initialize build-in exporters
exporters.add(new TemplateExporter("HTML", "html", "html", null, StandardFileType.HTML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter(Localization.lang("Simple HTML"), "simplehtml", "simplehtml", null, StandardFileType.HTML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("DocBook 5.1", "docbook5", "docbook5", null, StandardFileType.XML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("DocBook 4", "docbook4", "docbook4", null, StandardFileType.XML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("DIN 1505", "din1505", "din1505winword", "din1505", StandardFileType.RTF, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("BibO RDF", "bibordf", "bibordf", null, StandardFileType.RDF, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter(Localization.lang("HTML table"), "tablerefs", "tablerefs", "tablerefs", StandardFileType.HTML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter(Localization.lang("HTML list"), "listrefs", "listrefs", "listrefs", StandardFileType.HTML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter(Localization.lang("HTML table (with Abstract & BibTeX)"), "tablerefsabsbib", "tablerefsabsbib", "tablerefsabsbib", StandardFileType.HTML, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("Harvard RTF", "harvard", "harvard", "harvard", StandardFileType.RTF, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("ISO 690 RTF", "iso690rtf", "iso690RTF", "iso690rtf", StandardFileType.RTF, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("ISO 690", "iso690txt", "iso690", "iso690txt", StandardFileType.TXT, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("Endnote", "endnote", "EndNote", "endnote", StandardFileType.TXT, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("OpenOffice/LibreOffice CSV", "oocsv", "openoffice-csv", "openoffice", StandardFileType.CSV, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("RIS", "ris", "ris", "ris", StandardFileType.RIS, layoutPreferences, saveConfiguration, BlankLineBehaviour.DELETE_BLANKS));
exporters.add(new TemplateExporter("MIS Quarterly", "misq", "misq", "misq", StandardFileType.RTF, layoutPreferences, saveConfiguration));
exporters.add(new TemplateExporter("CSL YAML", "yaml", "yaml", null, StandardFileType.YAML, layoutPreferences, saveConfiguration, BlankLineBehaviour.DELETE_BLANKS));
exporters.add(new OpenOfficeDocumentCreator());
exporters.add(new OpenDocumentSpreadsheetCreator());
exporters.add(new MSBibExporter());
exporters.add(new ModsExporter());
exporters.add(new XmpExporter(xmpPreferences));
exporters.add(new XmpPdfExporter(xmpPreferences));
exporters.add(new EmbeddedBibFilePdfExporter(bibDatabaseMode, entryTypesManager, fieldPreferences));
// Now add custom export formats
exporters.addAll(customFormats);
return new ExporterFactory(exporters);
}
/**
* Get a list of all exporters.
*
* @return A list containing all exporters
*/
public List<Exporter> getExporters() {
return Collections.unmodifiableList(exporters);
}
/**
* Look up the named exporter (case-insensitive).
*
* @param consoleName The export name given in the JabRef console help information.
* @return The exporter, or an empty option if no exporter with that name is registered.
*/
public Optional<Exporter> getExporterByName(String consoleName) {
return exporters.stream().filter(exporter -> exporter.getId().equalsIgnoreCase(consoleName)).findFirst();
}
}
| 5,603
| 61.966292
| 220
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/GroupSerializer.java
|
package org.jabref.logic.exporter;
import java.util.ArrayList;
import java.util.List;
import javafx.scene.paint.Color;
import org.jabref.logic.util.MetadataSerializationConfiguration;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.model.groups.AbstractGroup;
import org.jabref.model.groups.AllEntriesGroup;
import org.jabref.model.groups.AutomaticGroup;
import org.jabref.model.groups.AutomaticKeywordGroup;
import org.jabref.model.groups.AutomaticPersonsGroup;
import org.jabref.model.groups.ExplicitGroup;
import org.jabref.model.groups.GroupTreeNode;
import org.jabref.model.groups.KeywordGroup;
import org.jabref.model.groups.RegexKeywordGroup;
import org.jabref.model.groups.SearchGroup;
import org.jabref.model.groups.TexGroup;
import org.jabref.model.search.rules.SearchRules;
import org.jabref.model.strings.StringUtil;
public class GroupSerializer {
private static String serializeAllEntriesGroup() {
return MetadataSerializationConfiguration.ALL_ENTRIES_GROUP_ID;
}
private String serializeExplicitGroup(ExplicitGroup group) {
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.EXPLICIT_GROUP_ID);
sb.append(StringUtil.quote(group.getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(group.getHierarchicalContext().ordinal());
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
private String serializeKeywordGroup(KeywordGroup group) {
Boolean isRegex = group instanceof RegexKeywordGroup;
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.KEYWORD_GROUP_ID);
sb.append(StringUtil.quote(group.getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(group.getHierarchicalContext().ordinal());
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(group.getSearchField().getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(group.getSearchExpression(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.booleanToBinaryString(group.isCaseSensitive()));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.booleanToBinaryString(isRegex));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
private String serializeSearchGroup(SearchGroup group) {
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.SEARCH_GROUP_ID);
sb.append(StringUtil.quote(group.getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(group.getHierarchicalContext().ordinal());
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(group.getSearchExpression(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.booleanToBinaryString(group.getSearchFlags().contains(SearchRules.SearchFlags.CASE_SENSITIVE)));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.booleanToBinaryString(group.getSearchFlags().contains(SearchRules.SearchFlags.REGULAR_EXPRESSION)));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
private void appendGroupDetails(StringBuilder builder, AbstractGroup group) {
builder.append(StringUtil.booleanToBinaryString(group.isExpanded()));
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
builder.append(group.getColor().map(Color::toString).orElse(""));
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
builder.append(group.getIconName().orElse(""));
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
builder.append(group.getDescription().orElse(""));
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
}
/**
* Returns a textual representation of this node and its children. This
* representation contains both the tree structure and the textual
* representations of the group associated with each node.
* Every node is one entry in the list of strings.
*
* @return a representation of the tree based at this node as a list of strings
*/
public List<String> serializeTree(GroupTreeNode node) {
List<String> representation = new ArrayList<>();
// Append current node
representation.add(String.valueOf(node.getLevel()) + ' ' + serializeGroup(node.getGroup()));
// Append children
for (GroupTreeNode child : node.getChildren()) {
representation.addAll(serializeTree(child));
}
return representation;
}
private String serializeGroup(AbstractGroup group) {
if (group instanceof AllEntriesGroup) {
return serializeAllEntriesGroup();
} else if (group instanceof ExplicitGroup explicitGroup) {
return serializeExplicitGroup(explicitGroup);
} else if (group instanceof KeywordGroup keywordGroup) {
return serializeKeywordGroup(keywordGroup);
} else if (group instanceof SearchGroup searchGroup) {
return serializeSearchGroup(searchGroup);
} else if (group instanceof AutomaticKeywordGroup keywordGroup) {
return serializeAutomaticKeywordGroup(keywordGroup);
} else if (group instanceof AutomaticPersonsGroup personsGroup) {
return serializeAutomaticPersonsGroup(personsGroup);
} else if (group instanceof TexGroup texGroup) {
return serializeTexGroup(texGroup);
} else {
throw new UnsupportedOperationException("Don't know how to serialize group" + group.getClass().getName());
}
}
private String serializeTexGroup(TexGroup group) {
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.TEX_GROUP_ID);
sb.append(StringUtil.quote(group.getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(group.getHierarchicalContext().ordinal());
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(FileUtil.toPortableString(group.getFilePath()), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
private String serializeAutomaticPersonsGroup(AutomaticPersonsGroup group) {
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.AUTOMATIC_PERSONS_GROUP_ID);
appendAutomaticGroupDetails(sb, group);
sb.append(StringUtil.quote(group.getField().getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
private void appendAutomaticGroupDetails(StringBuilder builder, AutomaticGroup group) {
builder.append(StringUtil.quote(group.getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
builder.append(group.getHierarchicalContext().ordinal());
builder.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
}
private String serializeAutomaticKeywordGroup(AutomaticKeywordGroup group) {
StringBuilder sb = new StringBuilder();
sb.append(MetadataSerializationConfiguration.AUTOMATIC_KEYWORD_GROUP_ID);
appendAutomaticGroupDetails(sb, group);
sb.append(StringUtil.quote(group.getField().getName(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(group.getKeywordDelimiter().toString(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
sb.append(StringUtil.quote(group.getKeywordHierarchicalDelimiter().toString(), MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR, MetadataSerializationConfiguration.GROUP_QUOTE_CHAR));
sb.append(MetadataSerializationConfiguration.GROUP_UNIT_SEPARATOR);
appendGroupDetails(sb, group);
return sb.toString();
}
}
| 10,095
| 54.472527
| 198
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/MSBibExporter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.TransformerFactoryConfigurationError;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.jabref.logic.msbib.MSBibDatabase;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
/**
* TemplateExporter for exporting in MSBIB XML format.
*/
class MSBibExporter extends Exporter {
public MSBibExporter() {
super("MSBib", "MS Office 2007", StandardFileType.XML);
}
@Override
public void export(final BibDatabaseContext databaseContext, final Path file,
List<BibEntry> entries) throws SaveException {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(entries);
if (entries.isEmpty()) {
return;
}
MSBibDatabase msBibDatabase = new MSBibDatabase(databaseContext.getDatabase(), entries);
// forcing to use UTF8 output format for some problems with xml export in other encodings
try (AtomicFileWriter ps = new AtomicFileWriter(file, StandardCharsets.UTF_8)) {
try {
DOMSource source = new DOMSource(msBibDatabase.getDomForExport());
StreamResult result = new StreamResult(ps);
Transformer trans = TransformerFactory.newInstance().newTransformer();
trans.setOutputProperty(OutputKeys.INDENT, "yes");
trans.transform(source, result);
} catch (TransformerException | IllegalArgumentException | TransformerFactoryConfigurationError e) {
throw new SaveException(e);
}
} catch (IOException ex) {
throw new SaveException(ex);
}
}
}
| 2,138
| 35.254237
| 112
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/MetaDataSerializer.java
|
package org.jabref.logic.exporter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringJoiner;
import java.util.TreeMap;
import java.util.stream.Collectors;
import org.jabref.logic.citationkeypattern.AbstractCitationKeyPattern;
import org.jabref.logic.citationkeypattern.GlobalCitationKeyPattern;
import org.jabref.logic.cleanup.FieldFormatterCleanups;
import org.jabref.logic.util.OS;
import org.jabref.model.entry.BibEntryType;
import org.jabref.model.entry.field.BibField;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.types.EntryType;
import org.jabref.model.groups.GroupTreeNode;
import org.jabref.model.metadata.ContentSelector;
import org.jabref.model.metadata.MetaData;
import org.jabref.model.strings.StringUtil;
/**
* Reading is done at {@link org.jabref.logic.importer.util.MetaDataParser}
*/
public class MetaDataSerializer {
private MetaDataSerializer() {
}
/**
* Writes all data in the format <key, serialized data>.
*/
public static Map<String, String> getSerializedStringMap(MetaData metaData,
GlobalCitationKeyPattern globalCiteKeyPattern) {
// metadata-key, list of contents
// - contents to be separated by OS.NEWLINE
// - each meta data item is written as separate @Comment entry - see org.jabref.logic.exporter.BibtexDatabaseWriter.writeMetaDataItem
Map<String, List<String>> stringyMetaData = new HashMap<>();
// First write all meta data except groups
metaData.getSaveOrderConfig().ifPresent(
saveOrderConfig -> stringyMetaData.put(MetaData.SAVE_ORDER_CONFIG, saveOrderConfig.getAsStringList()));
metaData.getSaveActions().ifPresent(
saveActions -> stringyMetaData.put(MetaData.SAVE_ACTIONS, saveActions.getAsStringList(OS.NEWLINE)));
if (metaData.isProtected()) {
stringyMetaData.put(MetaData.PROTECTED_FLAG_META, Collections.singletonList("true"));
}
stringyMetaData.putAll(serializeCiteKeyPattern(metaData, globalCiteKeyPattern));
metaData.getMode().ifPresent(
mode -> stringyMetaData.put(MetaData.DATABASE_TYPE, Collections.singletonList(mode.getAsString())));
metaData.getDefaultFileDirectory().ifPresent(
path -> stringyMetaData.put(MetaData.FILE_DIRECTORY, Collections.singletonList(path.trim())));
metaData.getUserFileDirectories().forEach((user, path) -> stringyMetaData
.put(MetaData.FILE_DIRECTORY + '-' + user, Collections.singletonList(path.trim())));
metaData.getLatexFileDirectories().forEach((user, path) -> stringyMetaData
.put(MetaData.FILE_DIRECTORY_LATEX + '-' + user, Collections.singletonList(path.toString().trim())));
metaData.getVersionDBStructure().ifPresent(
versionDBStructure -> stringyMetaData.put(MetaData.VERSION_DB_STRUCT, Collections.singletonList(versionDBStructure.trim())));
for (ContentSelector selector : metaData.getContentSelectorList()) {
stringyMetaData.put(MetaData.SELECTOR_META_PREFIX + selector.getField().getName(), selector.getValues());
}
Map<String, String> serializedMetaData = serializeMetaData(stringyMetaData);
// Write groups if present.
// Skip this if only the root node exists (which is always the AllEntriesGroup).
metaData.getGroups().filter(root -> root.getNumberOfChildren() > 0).ifPresent(
root -> serializedMetaData.put(MetaData.GROUPSTREE, serializeGroups(root)));
// finally add all unknown meta data items to the serialization map
Map<String, List<String>> unknownMetaData = metaData.getUnknownMetaData();
for (Map.Entry<String, List<String>> entry : unknownMetaData.entrySet()) {
// The last "MetaData.SEPARATOR_STRING" adds compatibility to JabRef v5.9 and earlier
StringJoiner value = new StringJoiner(MetaData.SEPARATOR_STRING + OS.NEWLINE, OS.NEWLINE, MetaData.SEPARATOR_STRING + OS.NEWLINE);
for (String line : entry.getValue()) {
value.add(line.replace(MetaData.SEPARATOR_STRING, "\\" + MetaData.SEPARATOR_STRING));
}
serializedMetaData.put(entry.getKey(), value.toString());
}
return serializedMetaData;
}
private static Map<String, String> serializeMetaData(Map<String, List<String>> stringyMetaData) {
Map<String, String> serializedMetaData = new TreeMap<>();
for (Map.Entry<String, List<String>> metaItem : stringyMetaData.entrySet()) {
List<String> itemList = metaItem.getValue();
if (itemList.isEmpty()) {
// Only add non-empty values
continue;
}
boolean isSaveActions = metaItem.getKey().equals(MetaData.SAVE_ACTIONS);
// The last "MetaData.SEPARATOR_STRING" adds compatibility to JabRef v5.9 and earlier
StringJoiner joiner = new StringJoiner(MetaData.SEPARATOR_STRING, "", MetaData.SEPARATOR_STRING);
boolean lastWasSaveActionsEnablement = false;
for (String dataItem : itemList) {
String string;
if (lastWasSaveActionsEnablement) {
string = OS.NEWLINE;
} else {
string = "";
}
string += StringUtil.quote(dataItem, MetaData.SEPARATOR_STRING, MetaData.ESCAPE_CHARACTER);
// in case of save actions, add an additional newline after the enabled flag
lastWasSaveActionsEnablement = isSaveActions
&& (FieldFormatterCleanups.ENABLED.equals(dataItem)
|| FieldFormatterCleanups.DISABLED.equals(dataItem));
joiner.add(string);
}
String serializedItem = joiner.toString();
if (!serializedItem.isEmpty()) {
// Only add non-empty values
serializedMetaData.put(metaItem.getKey(), serializedItem);
}
}
return serializedMetaData;
}
private static Map<String, List<String>> serializeCiteKeyPattern(MetaData metaData, GlobalCitationKeyPattern globalCitationKeyPattern) {
Map<String, List<String>> stringyPattern = new HashMap<>();
AbstractCitationKeyPattern citationKeyPattern = metaData.getCiteKeyPattern(globalCitationKeyPattern);
for (EntryType key : citationKeyPattern.getAllKeys()) {
if (!citationKeyPattern.isDefaultValue(key)) {
List<String> data = new ArrayList<>();
data.add(citationKeyPattern.getValue(key).get(0));
String metaDataKey = MetaData.PREFIX_KEYPATTERN + key.getName();
stringyPattern.put(metaDataKey, data);
}
}
if ((citationKeyPattern.getDefaultValue() != null) && !citationKeyPattern.getDefaultValue().isEmpty()) {
List<String> data = new ArrayList<>();
data.add(citationKeyPattern.getDefaultValue().get(0));
stringyPattern.put(MetaData.KEYPATTERNDEFAULT, data);
}
return stringyPattern;
}
private static String serializeGroups(GroupTreeNode root) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append(OS.NEWLINE);
for (String groupNode : new GroupSerializer().serializeTree(root)) {
stringBuilder.append(StringUtil.quote(groupNode, MetaData.SEPARATOR_STRING, MetaData.ESCAPE_CHARACTER));
stringBuilder.append(MetaData.SEPARATOR_STRING);
stringBuilder.append(OS.NEWLINE);
}
return stringBuilder.toString();
}
public static String serializeCustomEntryTypes(BibEntryType entryType) {
StringBuilder builder = new StringBuilder();
builder.append(MetaData.ENTRYTYPE_FLAG);
builder.append(entryType.getType().getName());
builder.append(": req[");
builder.append(FieldFactory.serializeOrFieldsList(entryType.getRequiredFields()));
builder.append("] opt[");
builder.append(FieldFactory.serializeFieldsList(
entryType.getOptionalFields()
.stream()
.map(BibField::field)
.collect(Collectors.toList())));
builder.append("]");
return builder.toString();
}
}
| 8,573
| 48.848837
| 142
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/ModsExporter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.StringReader;
import java.io.StringWriter;
import java.math.BigInteger;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;
import javax.xml.stream.XMLOutputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamWriter;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.stream.StreamResult;
import javax.xml.transform.stream.StreamSource;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.jabref.model.entry.types.EntryType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* TemplateExporter for exporting in MODS XML format.
*/
class ModsExporter extends Exporter {
private static final String MODS_NAMESPACE_URI = "http://www.loc.gov/mods/v3";
private static final String MINUS = "-";
private static final String DOUBLE_MINUS = "--";
private static final String MODS_SCHEMA_LOCATION = "http://www.loc.gov/standards/mods/v3/mods-3-6.xsd";
private static final Logger LOGGER = LoggerFactory.getLogger(ModsExporter.class);
public ModsExporter() {
super("mods", "MODS", StandardFileType.XML);
}
@Override
public void export(final BibDatabaseContext databaseContext, final Path file, List<BibEntry> entries) throws SaveException {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(entries);
if (entries.isEmpty()) { // Only export if entries exist
return;
}
XMLStreamWriter writer = null;
try {
StringWriter sw = new StringWriter();
// writer is not an auto closable!
writer = createWriter(sw);
for (BibEntry bibEntry : entries) {
if (bibEntry.getCitationKey().isPresent()) {
String citekey = bibEntry.getCitationKey().get();
addIdentifier(writer, new UnknownField("citekey"), citekey);
} else {
writer.writeStartElement("mods", "mods", MODS_NAMESPACE_URI);
}
Map<Field, String> fieldMap = new TreeMap<>(Comparator.comparing(Field::getName));
fieldMap.putAll(bibEntry.getFieldMap());
addGenre(writer, bibEntry.getType());
List<String> originItems = new ArrayList<>();
List<String> parts = new ArrayList<>();
for (Map.Entry<Field, String> entry : fieldMap.entrySet()) {
Field field = entry.getKey();
String value = entry.getValue();
if (StandardField.AUTHOR == field) {
handleAuthors(writer, value);
} else if (new UnknownField("affiliation").equals(field)) {
addAffiliation(writer, value);
} else if (StandardField.ABSTRACT == field) {
addAbstract(writer, value);
} else if (StandardField.TITLE == field) {
addTitle(writer, value);
} else if (StandardField.LANGUAGE == field) {
addLanguage(writer, value);
} else if (StandardField.LOCATION == field) {
addLocation(writer, value);
} else if (StandardField.URL == field) {
addUrl(writer, value);
} else if (StandardField.NOTE == field) {
addNote(writer, value);
} else if (StandardField.KEYWORDS == field) {
addKeyWords(writer, value);
} else if (StandardField.URI == field) {
addIdentifier(writer, StandardField.URI, value);
} else if (StandardField.ISBN == field) {
addIdentifier(writer, StandardField.ISBN, value);
} else if (StandardField.ISSN == field) {
addIdentifier(writer, StandardField.ISSN, value);
} else if (StandardField.DOI == field) {
addIdentifier(writer, StandardField.DOI, value);
} else if (StandardField.PMID == field) {
addIdentifier(writer, StandardField.PMID, value);
} else if (StandardField.PAGES == field) {
addPart(parts, value);
} else if (StandardField.VOLUME == field) {
addPart(parts, value);
} else if (StandardField.ISSUE == field) {
addPart(parts, value);
}
trackOriginInformation(originItems, field, value);
}
writeOriginInformation(writer, originItems, fieldMap);
// Write related items
writeRelatedInformation(writer, parts, fieldMap);
writer.writeEndElement(); // end mods
}
writer.writeEndDocument();
writerFormatted(file, sw);
} catch (XMLStreamException | IOException | TransformerException ex) {
throw new SaveException(ex);
} finally {
try {
if (writer != null) {
writer.flush();
writer.close();
}
} catch (XMLStreamException e) {
LOGGER.error("Error closing XML writer", e);
}
}
}
private XMLStreamWriter createWriter(StringWriter sw) throws XMLStreamException {
XMLOutputFactory outputFactory = XMLOutputFactory.newFactory();
XMLStreamWriter writer = outputFactory.createXMLStreamWriter(new StreamResult(sw));
writer.writeDTD("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n");
writer.writeStartElement("mods", "modsCollection", MODS_NAMESPACE_URI);
writer.writeNamespace("mods", MODS_NAMESPACE_URI);
writer.writeNamespace("ns2", "http://www.w3.org/1999/xlink");
writer.writeNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance");
writer.writeAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance", "schemaLocation", MODS_SCHEMA_LOCATION);
return writer;
}
private void writerFormatted(Path file, StringWriter sw) throws TransformerException, IOException {
Transformer transformer = TransformerFactory.newInstance().newTransformer();
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty(OutputKeys.STANDALONE, "yes");
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "4");
try (OutputStream outputStream = Files.newOutputStream(file)) {
transformer.transform(new StreamSource(new StringReader(sw.toString())), new StreamResult(outputStream));
}
}
private void writeOriginInformation(XMLStreamWriter writer, List<String> originItems, Map<Field, String> fieldMap) throws XMLStreamException {
if (originItems.isEmpty()) {
writer.writeEmptyElement("mods", "originInfo", MODS_NAMESPACE_URI);
} else {
writer.writeStartElement("mods", "originInfo", MODS_NAMESPACE_URI);
for (Map.Entry<Field, String> entry : fieldMap.entrySet()) {
Field field = entry.getKey();
String value = entry.getValue();
addOriginInformation(writer, field, value);
}
writer.writeEndElement();
}
}
private void writeRelatedInformation(XMLStreamWriter writer, List<String> parts, Map<Field, String> fieldMap) throws XMLStreamException {
writer.writeStartElement("mods", "relatedItem", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "host");
for (Map.Entry<Field, String> entry : fieldMap.entrySet()) {
Field field = entry.getKey();
String value = entry.getValue();
if (StandardField.JOURNAL == field) {
addJournal(writer, value);
}
}
writePartInformation(writer, parts, fieldMap);
writer.writeEndElement(); // end relatedItem
writer.writeStartElement("mods", "typeOfResource", MODS_NAMESPACE_URI);
writer.writeCharacters("text");
writer.writeEndElement(); // end typeOfResource
}
private void writePartInformation(XMLStreamWriter writer, List<String> parts, Map<Field, String> fieldMap) throws XMLStreamException {
if (parts.isEmpty()) {
writer.writeEmptyElement("mods", "part", MODS_NAMESPACE_URI);
} else {
writer.writeStartElement("mods", "part", MODS_NAMESPACE_URI);
for (Map.Entry<Field, String> entry : fieldMap.entrySet()) {
Field field = entry.getKey();
String value = entry.getValue();
if (StandardField.PAGES == field) {
addPages(writer, value);
} else if (StandardField.VOLUME == field) {
addDetail(writer, StandardField.VOLUME, value);
} else if (StandardField.ISSUE == field) {
addDetail(writer, StandardField.ISSUE, value);
}
}
writer.writeEndElement(); // end part
}
}
private void trackOriginInformation(List<String> originItems, Field field, String value) {
if (field.equals(StandardField.YEAR)) {
originItems.add(value);
} else if (field.equals(new UnknownField("created"))) {
originItems.add(value);
} else if (field.equals(StandardField.MODIFICATIONDATE)) {
originItems.add(value);
} else if (field.equals(StandardField.CREATIONDATE)) {
originItems.add(value);
} else if (StandardField.PUBLISHER == field) {
originItems.add(value);
} else if (field.equals(new UnknownField("issuance"))) {
originItems.add(value);
} else if (field.equals(StandardField.ADDRESS)) {
originItems.add(value);
} else if (field.equals(StandardField.EDITION)) {
originItems.add(value);
}
}
private void addPart(List<String> part, String value) {
part.add(value);
}
private void addGenre(XMLStreamWriter writer, EntryType entryType) throws XMLStreamException {
writer.writeStartElement("mods", "genre", MODS_NAMESPACE_URI);
writer.writeCharacters(entryType.getName());
writer.writeEndElement();
}
private void addAbstract(XMLStreamWriter writer, String value) throws XMLStreamException {
writer.writeStartElement("mods", "abstract", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // end abstract
}
private void addTitle(XMLStreamWriter writer, String value) throws XMLStreamException {
writer.writeStartElement("mods", "titleInfo", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "title", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // end title
writer.writeEndElement(); // end titleInfo
}
private void addAffiliation(XMLStreamWriter writer, String value) throws XMLStreamException {
writer.writeStartElement("mods", "name", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "affiliation", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // end affiliation
writer.writeEndElement(); // end name
}
private void addLocation(XMLStreamWriter writer, String value) throws XMLStreamException {
writer.writeStartElement("mods", "location", MODS_NAMESPACE_URI);
String[] locations = value.split(", ");
for (String location : locations) {
writer.writeStartElement("mods", "physicalLocation", MODS_NAMESPACE_URI);
writer.writeCharacters(location);
writer.writeEndElement();
}
writer.writeEndElement();
}
private void addNote(XMLStreamWriter writer, String value) throws XMLStreamException {
String[] notes = value.split(", ");
for (String note : notes) {
writer.writeStartElement("mods", "note", MODS_NAMESPACE_URI);
writer.writeCharacters(note);
writer.writeEndElement();
}
}
private void addUrl(XMLStreamWriter writer, String value) throws XMLStreamException {
String[] urls = value.split(", ");
writer.writeStartElement("mods", "location", MODS_NAMESPACE_URI);
for (String url : urls) {
writer.writeStartElement("mods", "url", MODS_NAMESPACE_URI);
writer.writeCharacters(url);
writer.writeEndElement();
}
writer.writeEndElement();
}
private void addJournal(XMLStreamWriter writer, String value) throws XMLStreamException { // this may also need to be called within second for loop?
// Start TitleInfoDefinition
writer.writeStartElement("mods", "titleInfo", MODS_NAMESPACE_URI);
// Write title element
writer.writeStartElement("mods", "title", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // End title element
// End TitleInfoDefinition
writer.writeEndElement(); // End titleInfo element
}
private void addLanguage(XMLStreamWriter writer, String value) throws XMLStreamException {
writer.writeStartElement("mods", "language", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "languageTerm", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // end languageTerm
writer.writeEndElement(); // end language
}
private void addPages(XMLStreamWriter writer, String value) throws XMLStreamException {
if (value.contains(DOUBLE_MINUS)) {
addStartAndEndPage(writer, value, DOUBLE_MINUS);
} else if (value.contains(MINUS)) {
addStartAndEndPage(writer, value, MINUS);
} else {
BigInteger total = new BigInteger(value);
writer.writeStartElement("mods", "extent", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "total", MODS_NAMESPACE_URI);
writer.writeCharacters(total.toString());
writer.writeEndElement();
writer.writeEndElement();
}
}
private void addKeyWords(XMLStreamWriter writer, String value) throws XMLStreamException {
String[] keywords = value.split(", ");
for (String keyword : keywords) {
writer.writeStartElement("mods", "subject", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "topic", MODS_NAMESPACE_URI);
writer.writeCharacters(keyword);
writer.writeEndElement();
writer.writeEndElement();
}
}
private void handleAuthors(XMLStreamWriter writer, String value) throws XMLStreamException {
String[] authors = value.split("and");
for (String author : authors) {
writer.writeStartElement("mods", "name", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "personal");
if (author.contains(",")) {
// if author contains "," then this indicates that the author has a forename and family name
int commaIndex = author.indexOf(',');
String familyName = author.substring(0, commaIndex);
writer.writeStartElement("mods", "namePart", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "family");
writer.writeCharacters(familyName);
writer.writeEndElement();
// now take care of the forenames
String forename = author.substring(commaIndex + 1);
String[] forenames = forename.split(" ");
for (String given : forenames) {
if (!given.isEmpty()) {
writer.writeStartElement("mods", "namePart", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "given");
writer.writeCharacters(given);
writer.writeEndElement();
}
}
writer.writeEndElement();
} else {
// no "," indicates that there should only be a family name
writer.writeStartElement("mods", "namePart", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "family");
writer.writeCharacters(author);
writer.writeEndElement();
writer.writeEndElement();
}
}
}
private void addIdentifier(XMLStreamWriter writer, Field field, String value) throws XMLStreamException {
if (new UnknownField("citekey").equals(field)) {
writer.writeStartElement("mods", "mods", MODS_NAMESPACE_URI);
writer.writeAttribute("ID", value);
}
writer.writeStartElement("mods", "identifier", MODS_NAMESPACE_URI);
writer.writeAttribute("type", field.getName());
writer.writeCharacters(value);
writer.writeEndElement(); // end identifier
}
private void addStartAndEndPage(XMLStreamWriter writer, String value, String minus) throws XMLStreamException {
int minusIndex = value.indexOf(minus);
String startPage = value.substring(0, minusIndex);
String endPage = "";
if (MINUS.equals(minus)) {
endPage = value.substring(minusIndex + 1);
} else if (DOUBLE_MINUS.equals(minus)) {
endPage = value.substring(minusIndex + 2);
}
writer.writeStartElement("mods", "extent", MODS_NAMESPACE_URI);
writer.writeStartElement("mods", "start", MODS_NAMESPACE_URI);
writer.writeCharacters(startPage);
writer.writeEndElement();
writer.writeStartElement("mods", "end", MODS_NAMESPACE_URI);
writer.writeCharacters(endPage);
writer.writeEndElement();
writer.writeEndElement();
}
private void addDetail(XMLStreamWriter writer, Field field, String value) throws XMLStreamException {
writer.writeStartElement("mods", "detail", MODS_NAMESPACE_URI);
writer.writeAttribute("type", field.getName());
writer.writeStartElement("mods", "number", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement(); // end number
writer.writeEndElement(); // end detail
}
private void addOriginInformation(XMLStreamWriter writer, Field field, String value) throws XMLStreamException {
if (field.equals(StandardField.YEAR)) {
addDate(writer, "dateIssued", value);
} else if (field.equals(new UnknownField("created"))) {
addDate(writer, "dateCreated", value);
} else if (field.equals(StandardField.MODIFICATIONDATE)) {
addDate(writer, "dateModified", value);
} else if (field.equals(StandardField.CREATIONDATE)) {
addDate(writer, "dateCaptured", value);
} else if (StandardField.PUBLISHER == field) {
writer.writeStartElement("mods", "publisher", MODS_NAMESPACE_URI);
writer.writeAttribute("xsi", MODS_NAMESPACE_URI, "type", "mods:stringPlusLanguagePlusSupplied");
writer.writeCharacters(value);
writer.writeEndElement();
} else if (field.equals(new UnknownField("issuance"))) {
writer.writeStartElement("mods", "issuance", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement();
} else if (field.equals(StandardField.ADDRESS)) {
writer.writeStartElement("mods", "place", MODS_NAMESPACE_URI);
String[] places = value.split(", ");
for (String place : places) {
writer.writeStartElement("mods", "placeTerm", MODS_NAMESPACE_URI);
writer.writeAttribute("type", "text");
writer.writeCharacters(place);
writer.writeEndElement();
}
writer.writeEndElement();
} else if (field.equals(StandardField.EDITION)) {
writer.writeStartElement("mods", "edition", MODS_NAMESPACE_URI);
writer.writeCharacters(value);
writer.writeEndElement();
}
}
private void addDate(XMLStreamWriter writer, String dateName, String value) throws XMLStreamException {
writer.writeStartElement("mods", dateName, MODS_NAMESPACE_URI);
writer.writeAttribute("keyDate", "yes");
writer.writeCharacters(value);
writer.writeEndElement(); // close date element
}
}
| 21,413
| 43.987395
| 152
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/OOCalcDatabase.java
|
package org.jabref.logic.exporter;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.xml.parsers.DocumentBuilderFactory;
import org.jabref.logic.bibtex.comparator.FieldComparator;
import org.jabref.logic.bibtex.comparator.FieldComparatorStack;
import org.jabref.logic.layout.format.GetOpenOfficeType;
import org.jabref.logic.layout.format.RemoveBrackets;
import org.jabref.logic.layout.format.RemoveWhitespace;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.FieldFactory;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Text;
class OOCalcDatabase {
private static final Logger LOGGER = LoggerFactory.getLogger(OOCalcDatabase.class);
private static final Field REPORT_TYPE_FIELD = new UnknownField("reporttype");
private final List<BibEntry> entries = new ArrayList<>();
private final List<Field> toExportFields = Stream.concat(FieldFactory.getStandardFieldsWithCitationKey().stream(), Stream.of(REPORT_TYPE_FIELD))
.collect(Collectors.toList());
public OOCalcDatabase(BibDatabase bibtex, List<BibEntry> entries) {
this.entries.addAll(entries != null ? entries : bibtex.getEntries());
List<FieldComparator> comparators = new ArrayList<>();
comparators.add(new FieldComparator(StandardField.AUTHOR));
comparators.add(new FieldComparator(StandardField.YEAR));
comparators.add(new FieldComparator(InternalField.KEY_FIELD));
this.entries.sort(new FieldComparatorStack<>(comparators));
}
private static String getField(BibEntry e, Field field) {
return e.getField(field).orElse("");
}
public Document getDOMrepresentation() {
Document document = null;
try {
document = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
Element root = createRootElement(document);
Element body = document.createElement("office:body");
Element table = createTableElement(document);
body.appendChild(table);
root.appendChild(body);
document.appendChild(root);
addTableHeader(table, document);
for (BibEntry entry : entries) {
addEntryRow(entry, table, document);
}
} catch (Exception e) {
LOGGER.warn("Exception caught...", e);
}
return document;
}
private void addEntryRow(BibEntry entry, Element table, Document document) {
final Element row = document.createElement("table:table-row");
addTableCell(document, row, new GetOpenOfficeType().format(entry.getType().getName()));
toExportFields.forEach(field -> {
if (field.equals(StandardField.TITLE)) {
addTableCell(document, row, new RemoveWhitespace().format(new RemoveBrackets().format(getField(entry, StandardField.TITLE))));
} else {
addTableCell(document, row, getField(entry, field));
}
});
table.appendChild(row);
}
private Element createTableElement(Document document) {
Element table = document.createElement("table:table");
table.setAttribute("table:name", "biblio");
table.setAttribute("table.style-name", "ta1");
return table;
}
private Element createRootElement(Document document) {
Element root = document.createElement("office:document-content");
root.setAttribute("xmlns:office", "http://openoffice.org/2000/office");
root.setAttribute("xmlns:style", "http://openoffice.org/2000/style");
root.setAttribute("xmlns:text", "http://openoffice.org/2000/text");
root.setAttribute("xmlns:table", "http://openoffice.org/2000/table");
root.setAttribute("xmlns:office:class", "spreadsheet");
root.setAttribute("xmlns:office:version", "1.0");
root.setAttribute("xmlns:fo", "http://www.w3.org/1999/XSL/Format");
Element el = document.createElement("office:script");
root.appendChild(el);
el = document.createElement("office:automatic-styles");
Element el2 = document.createElement("style:style");
el2.setAttribute("style:name", "ro1");
el2.setAttribute("style:family", "table-row");
Element el3 = document.createElement("style.properties");
el3.setAttribute("style:row-height", "0.1681inch");
el3.setAttribute("fo:break-before", "auto");
el3.setAttribute("style:use-optimal-row-height", "true");
el2.appendChild(el3);
el.appendChild(el2);
el2 = document.createElement("style:style");
el2.setAttribute("style:name", "ta1");
el2.setAttribute("style:family", "table");
el2.setAttribute("style:master-page-name", "Default");
el3 = document.createElement("style:properties");
el3.setAttribute("table:display", "true");
el2.appendChild(el3);
el.appendChild(el2);
root.appendChild(el);
return root;
}
private static void addTableCell(Document doc, Element parent, String content) {
Element cell = doc.createElement("table:table-cell");
Element text = doc.createElement("text:p");
Text textNode = doc.createTextNode(content);
text.appendChild(textNode);
cell.appendChild(text);
parent.appendChild(cell);
}
private void addTableHeader(Element table, Document document) {
Element firstRow = document.createElement("table:table-row");
firstRow.setAttribute("table.style-name", "ro1");
addTableCell(document, firstRow, "Type");
for (Field field : toExportFields) {
addTableCell(document, firstRow, field.getDisplayName());
}
table.appendChild(firstRow);
}
}
| 6,224
| 39.686275
| 148
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/OpenDocumentRepresentation.java
|
package org.jabref.logic.exporter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.jabref.logic.bibtex.comparator.FieldComparator;
import org.jabref.logic.bibtex.comparator.FieldComparatorStack;
import org.jabref.logic.layout.format.GetOpenOfficeType;
import org.jabref.logic.layout.format.RemoveBrackets;
import org.jabref.logic.layout.format.RemoveWhitespace;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.Field;
import org.jabref.model.entry.field.InternalField;
import org.jabref.model.entry.field.StandardField;
import org.jabref.model.entry.field.UnknownField;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Text;
class OpenDocumentRepresentation {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenDocumentRepresentation.class);
private final List<BibEntry> entries;
private final BibDatabase database;
public OpenDocumentRepresentation(BibDatabase database, List<BibEntry> entries) {
this.database = database;
// Make a list of comparators for sorting the entries:
List<FieldComparator> comparators = new ArrayList<>();
comparators.add(new FieldComparator(StandardField.AUTHOR));
comparators.add(new FieldComparator(StandardField.YEAR));
comparators.add(new FieldComparator(InternalField.KEY_FIELD));
// Use glazed lists to get a sorted view of the entries:
List<BibEntry> entryList = new ArrayList<>();
// Set up a list of all entries, if entries==null, or the entries in the given list
if (entries == null) {
entryList.addAll(database.getEntries());
} else {
entryList.addAll(entries);
}
Collections.sort(entryList, new FieldComparatorStack<>(comparators));
this.entries = entryList;
}
public Document getDOMrepresentation() {
Document result = null;
try {
DocumentBuilder dbuild = DocumentBuilderFactory.newInstance().newDocumentBuilder();
result = dbuild.newDocument();
Element collection = result.createElement("office:document-content");
// collection.setAttribute("xmlns", "http://openoffice.org/2000/office");
collection.setAttribute("xmlns:office", "urn:oasis:names:tc:opendocument:xmlns:office:1.0");
collection.setAttribute("xmlns:style", "urn:oasis:names:tc:opendocument:xmlns:style:1.0");
collection.setAttribute("xmlns:text", "urn:oasis:names:tc:opendocument:xmlns:text:1.0");
collection.setAttribute("xmlns:table", "urn:oasis:names:tc:opendocument:xmlns:table:1.0");
collection.setAttribute("xmlns:meta", "urn:oasis:names:tc:opendocument:xmlns:meta:1.0");
collection.setAttribute("office:version", "1.0");
collection.setAttribute("xmlns:fo", "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0");
collection.setAttribute("xmlns:xlink", "http://www.w3.org/1999/xlink");
Element el = result.createElement("office:scripts");
collection.appendChild(el);
el = result.createElement("office:automatic-styles");
Element el2 = result.createElement("style:style");
el2.setAttribute("style:name", "ro1");
el2.setAttribute("style:family", "table-row");
Element el3 = result.createElement("style.table-row-properties");
el3.setAttribute("style:row-height", "0.1681inch");
el3.setAttribute("fo:break-before", "auto");
el3.setAttribute("style:use-optimal-row-height", "true");
el2.appendChild(el3);
el.appendChild(el2);
el2 = result.createElement("style:style");
el2.setAttribute("style:name", "ta1");
el2.setAttribute("style:family", "table");
el2.setAttribute("style:master-page-name", "Default");
el3 = result.createElement("style:properties");
el3.setAttribute("table:display", "true");
el2.appendChild(el3);
el.appendChild(el2);
collection.appendChild(el);
Element body = result.createElement("office:body");
Element spreadsheet = result.createElement("office:spreadsheet");
Element table = result.createElement("table:table");
table.setAttribute("table:name", "biblio");
table.setAttribute("table.style-name", "ta1");
Element row = result.createElement("table:table-row");
row.setAttribute("table.style-name", "ro1");
addTableCell(result, row, "Identifier");
addTableCell(result, row, "Type");
addTableCell(result, row, "Address");
addTableCell(result, row, "Assignee");
addTableCell(result, row, "Annote");
addTableCell(result, row, "Author");
addTableCell(result, row, "Booktitle");
addTableCell(result, row, "Chapter");
addTableCell(result, row, "Day");
addTableCell(result, row, "Dayfiled");
addTableCell(result, row, "Edition");
addTableCell(result, row, "Editor");
addTableCell(result, row, "Howpublish");
addTableCell(result, row, "Institution");
addTableCell(result, row, "Journal");
addTableCell(result, row, "Language");
addTableCell(result, row, "Month");
addTableCell(result, row, "Monthfiled");
addTableCell(result, row, "Nationality");
addTableCell(result, row, "Note");
addTableCell(result, row, "Number");
addTableCell(result, row, "Organization");
addTableCell(result, row, "Pages");
addTableCell(result, row, "Publisher");
addTableCell(result, row, "Revision");
addTableCell(result, row, "School");
addTableCell(result, row, "Series");
addTableCell(result, row, "Title");
addTableCell(result, row, "RepType");
addTableCell(result, row, "Volume");
addTableCell(result, row, "Year");
addTableCell(result, row, "Yearfiled");
addTableCell(result, row, "URL");
addTableCell(result, row, "Custom1");
addTableCell(result, row, "Custom2");
addTableCell(result, row, "Custom3");
addTableCell(result, row, "Custom4");
addTableCell(result, row, "Custom5");
addTableCell(result, row, "ISBN");
table.appendChild(row);
for (BibEntry e : entries) {
row = result.createElement("table:table-row");
addTableCell(result, row, getField(e, InternalField.KEY_FIELD));
addTableCell(result, row, new GetOpenOfficeType().format(e.getType().getName()));
addTableCell(result, row, getField(e, StandardField.ADDRESS));
addTableCell(result, row, getField(e, StandardField.ASSIGNEE));
addTableCell(result, row, getField(e, StandardField.ANNOTE));
addTableCell(result, row, getField(e, StandardField.AUTHOR)); // new AuthorLastFirst().format(getField(e, StandardField.AUTHOR_FIELD)));
addTableCell(result, row, getField(e, StandardField.BOOKTITLE));
addTableCell(result, row, getField(e, StandardField.CHAPTER));
addTableCell(result, row, getField(e, StandardField.DAY));
addTableCell(result, row, getField(e, StandardField.DAYFILED));
addTableCell(result, row, getField(e, StandardField.EDITION));
addTableCell(result, row, getField(e, StandardField.EDITOR)); // new AuthorLastFirst().format(getField(e, StandardField.EDITOR_FIELD)));
addTableCell(result, row, getField(e, StandardField.HOWPUBLISHED));
addTableCell(result, row, getField(e, StandardField.INSTITUTION));
addTableCell(result, row, getField(e, StandardField.JOURNAL));
addTableCell(result, row, getField(e, StandardField.LANGUAGE));
addTableCell(result, row, getField(e, StandardField.MONTH));
addTableCell(result, row, getField(e, StandardField.MONTHFILED));
addTableCell(result, row, getField(e, StandardField.NATIONALITY));
addTableCell(result, row, getField(e, StandardField.NOTE));
addTableCell(result, row, getField(e, StandardField.NUMBER));
addTableCell(result, row, getField(e, StandardField.ORGANIZATION));
addTableCell(result, row, getField(e, StandardField.PAGES));
addTableCell(result, row, getField(e, StandardField.PUBLISHER));
addTableCell(result, row, getField(e, StandardField.REVISION));
addTableCell(result, row, getField(e, StandardField.SCHOOL));
addTableCell(result, row, getField(e, StandardField.SERIES));
addTableCell(result, row, new RemoveWhitespace().format(new RemoveBrackets().format(getField(e, StandardField.TITLE))));
addTableCell(result, row, getField(e, new UnknownField("reporttype")));
addTableCell(result, row, getField(e, StandardField.VOLUME));
addTableCell(result, row, getField(e, StandardField.YEAR));
addTableCell(result, row, getField(e, StandardField.YEARFILED));
addTableCell(result, row, getField(e, StandardField.URL));
addTableCell(result, row, "");
addTableCell(result, row, "");
addTableCell(result, row, "");
addTableCell(result, row, "");
addTableCell(result, row, "");
addTableCell(result, row, getField(e, StandardField.ISBN));
table.appendChild(row);
}
spreadsheet.appendChild(table);
body.appendChild(spreadsheet);
collection.appendChild(body);
result.appendChild(collection);
} catch (Exception e) {
LOGGER.warn("Exception caught...", e);
}
return result;
}
private String getField(BibEntry e, Field field) {
return e.getResolvedFieldOrAlias(field, database).orElse("");
}
private void addTableCell(Document doc, Element parent, String content) {
Element cell = doc.createElement("table:table-cell");
Element text = doc.createElement("text:p");
Text textNode = doc.createTextNode(content);
text.appendChild(textNode);
// text.setTextContent(content);
cell.appendChild(text);
parent.appendChild(cell);
}
}
| 10,955
| 50.679245
| 152
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/OpenDocumentSpreadsheetCreator.java
|
package org.jabref.logic.exporter;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import java.util.zip.CRC32;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OpenDocumentSpreadsheetCreator extends Exporter {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenDocumentSpreadsheetCreator.class);
/**
* Creates a new instance of OpenOfficeDocumentCreator
*/
public OpenDocumentSpreadsheetCreator() {
super("ods", Localization.lang("OpenDocument spreadsheet"), StandardFileType.ODS);
}
private static void storeOpenDocumentSpreadsheetFile(Path file, InputStream source) throws IOException {
try (ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream(Files.newOutputStream(file)))) {
// addResourceFile("mimetype", "/resource/ods/mimetype", out);
ZipEntry ze = new ZipEntry("mimetype");
String mime = "application/vnd.oasis.opendocument.spreadsheet";
ze.setMethod(ZipEntry.STORED);
ze.setSize(mime.length());
CRC32 crc = new CRC32();
crc.update(mime.getBytes());
ze.setCrc(crc.getValue());
out.putNextEntry(ze);
for (int i = 0; i < mime.length(); i++) {
out.write(mime.charAt(i));
}
out.closeEntry();
ZipEntry zipEntry = new ZipEntry("content.xml");
// zipEntry.setMethod(ZipEntry.DEFLATED);
out.putNextEntry(zipEntry);
int c;
while ((c = source.read()) >= 0) {
out.write(c);
}
out.closeEntry();
// Add manifest (required for OOo 2.0) and "meta.xml": These are in the
// resource/ods directory, and are copied verbatim into the zip file.
OpenDocumentSpreadsheetCreator.addResourceFile("meta.xml", "/resource/ods/meta.xml", out);
OpenDocumentSpreadsheetCreator.addResourceFile("META-INF/manifest.xml", "/resource/ods/manifest.xml", out);
}
}
private static void exportOpenDocumentSpreadsheet(Path file, BibDatabase database, List<BibEntry> entries)
throws IOException {
// First store the xml formatted content to a temporary file.
File tmpFile = File.createTempFile("opendocument", null);
OpenDocumentSpreadsheetCreator.exportOpenDocumentSpreadsheetXML(tmpFile, database, entries);
// Then add the content to the zip file:
try (BufferedInputStream in = new BufferedInputStream(new FileInputStream(tmpFile))) {
OpenDocumentSpreadsheetCreator.storeOpenDocumentSpreadsheetFile(file, in);
}
// Delete the temporary file:
if (!tmpFile.delete()) {
LOGGER.info("Cannot delete temporary export file");
}
}
@Override
public void export(final BibDatabaseContext databaseContext, final Path file,
List<BibEntry> entries) throws IOException {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(entries);
if (!entries.isEmpty()) { // Only export if entries exists
OpenDocumentSpreadsheetCreator.exportOpenDocumentSpreadsheet(file, databaseContext.getDatabase(), entries);
}
}
private static void exportOpenDocumentSpreadsheetXML(File tmpFile, BibDatabase database, List<BibEntry> entries) {
OpenDocumentRepresentation od = new OpenDocumentRepresentation(database, entries);
try (Writer ps = new OutputStreamWriter(new FileOutputStream(tmpFile), StandardCharsets.UTF_8)) {
DOMSource source = new DOMSource(od.getDOMrepresentation());
StreamResult result = new StreamResult(ps);
Transformer trans = TransformerFactory.newInstance().newTransformer();
trans.setOutputProperty(OutputKeys.INDENT, "yes");
trans.transform(source, result);
} catch (Exception e) {
throw new Error(e);
}
}
private static void addResourceFile(String name, String resource, ZipOutputStream out) throws IOException {
ZipEntry zipEntry = new ZipEntry(name);
out.putNextEntry(zipEntry);
OpenDocumentSpreadsheetCreator.addFromResource(resource, out);
out.closeEntry();
}
private static void addFromResource(String resource, OutputStream out) {
URL url = OpenDocumentSpreadsheetCreator.class.getResource(resource);
try (InputStream in = url.openStream()) {
byte[] buffer = new byte[256];
synchronized (out) {
while (true) {
int bytesRead = in.read(buffer);
if (bytesRead == -1) {
break;
}
out.write(buffer, 0, bytesRead);
}
}
} catch (IOException e) {
LOGGER.warn("Cannot get resource", e);
}
}
}
| 5,901
| 38.610738
| 119
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/OpenOfficeDocumentCreator.java
|
package org.jabref.logic.exporter;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabase;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class OpenOfficeDocumentCreator extends Exporter {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenOfficeDocumentCreator.class);
/**
* Creates a new instance of OpenOfficeDocumentCreator
*/
public OpenOfficeDocumentCreator() {
super("oocalc", "Old OpenOffice/LibreOffice Calc format", StandardFileType.SXC);
}
private static void storeOpenOfficeFile(Path file, InputStream source) throws Exception {
try (ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream(Files.newOutputStream(file)))) {
ZipEntry zipEntry = new ZipEntry("content.xml");
out.putNextEntry(zipEntry);
int c;
while ((c = source.read()) >= 0) {
out.write(c);
}
out.closeEntry();
// Add manifest (required for OOo 2.0), "meta.xml", "mimetype" files. These are in the
// resource/openoffice directory, and are copied verbatim into the zip file.
OpenOfficeDocumentCreator.addResourceFile("meta.xml", "/resource/openoffice/meta.xml", out);
OpenOfficeDocumentCreator.addResourceFile("mimetype", "/resource/openoffice/mimetype", out);
OpenOfficeDocumentCreator.addResourceFile("META-INF/manifest.xml", "/resource/openoffice/manifest.xml",
out);
}
}
private static void exportOpenOfficeCalc(Path file, BibDatabase database, List<BibEntry> entries) throws Exception {
// First store the xml formatted content to a temporary file.
File tmpFile = File.createTempFile("oocalc", null);
OpenOfficeDocumentCreator.exportOpenOfficeCalcXML(tmpFile, database, entries);
// Then add the content to the zip file:
try (BufferedInputStream in = new BufferedInputStream(new FileInputStream(tmpFile))) {
OpenOfficeDocumentCreator.storeOpenOfficeFile(file, in);
}
// Delete the temporary file:
if (!tmpFile.delete()) {
LOGGER.info("Cannot delete temporary export file");
}
}
@Override
public void export(final BibDatabaseContext databaseContext, final Path file,
List<BibEntry> entries) throws Exception {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(entries);
if (!entries.isEmpty()) { // Do not export if no entries
OpenOfficeDocumentCreator.exportOpenOfficeCalc(file, databaseContext.getDatabase(), entries);
}
}
private static void exportOpenOfficeCalcXML(File tmpFile, BibDatabase database, List<BibEntry> entries) {
OOCalcDatabase od = new OOCalcDatabase(database, entries);
try (Writer ps = new OutputStreamWriter(new FileOutputStream(tmpFile), StandardCharsets.UTF_8)) {
DOMSource source = new DOMSource(od.getDOMrepresentation());
StreamResult result = new StreamResult(ps);
Transformer trans = TransformerFactory.newInstance().newTransformer();
trans.setOutputProperty(OutputKeys.INDENT, "yes");
trans.transform(source, result);
} catch (Exception e) {
throw new Error(e);
}
}
private static void addResourceFile(String name, String resource, ZipOutputStream out) throws IOException {
ZipEntry zipEntry = new ZipEntry(name);
out.putNextEntry(zipEntry);
OpenOfficeDocumentCreator.addFromResource(resource, out);
out.closeEntry();
}
private static void addFromResource(String resource, OutputStream out) {
URL url = OpenOfficeDocumentCreator.class.getResource(resource);
try (InputStream in = url.openStream()) {
byte[] buffer = new byte[256];
synchronized (out) {
while (true) {
int bytesRead = in.read(buffer);
if (bytesRead == -1) {
break;
}
out.write(buffer, 0, bytesRead);
}
}
} catch (IOException e) {
LOGGER.warn("Cannot get resource", e);
}
}
}
| 5,193
| 38.648855
| 120
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/SaveConfiguration.java
|
package org.jabref.logic.exporter;
import org.jabref.model.metadata.SaveOrder;
public class SaveConfiguration {
// Encoding written at the top of the .bib file.
public static final String ENCODING_PREFIX = "Encoding: ";
private boolean reformatFile;
private SaveOrder saveOrder;
private boolean makeBackup;
private BibDatabaseWriter.SaveType saveType;
private boolean useMetadataSaveOrder;
public SaveConfiguration(SaveOrder saveOrder,
Boolean makeBackup,
BibDatabaseWriter.SaveType saveType,
Boolean useMetadataSaveOrder,
Boolean reformatFile) {
this.saveOrder = saveOrder;
this.makeBackup = makeBackup;
this.saveType = saveType;
this.useMetadataSaveOrder = useMetadataSaveOrder;
this.reformatFile = reformatFile;
}
public SaveConfiguration() {
this(SaveOrder.getDefaultSaveOrder(),
false,
BibDatabaseWriter.SaveType.ALL,
true,
false);
}
public boolean useMetadataSaveOrder() {
return useMetadataSaveOrder;
}
public SaveConfiguration withMetadataSaveOrder(boolean newTakeMetadataSaveOrderInAccount) {
this.useMetadataSaveOrder = newTakeMetadataSaveOrderInAccount;
return this;
}
public SaveOrder getSaveOrder() {
return saveOrder;
}
public SaveConfiguration withSaveOrder(SaveOrder newSaveOrder) {
this.saveOrder = newSaveOrder;
return this;
}
public boolean shouldMakeBackup() {
return makeBackup;
}
/**
* Required by {@link org.jabref.logic.autosaveandbackup.BackupManager}. Should not be used in other settings
*
* @param newMakeBackup whether a backup (.bak file) should be made
*/
public SaveConfiguration withMakeBackup(Boolean newMakeBackup) {
this.makeBackup = newMakeBackup;
return this;
}
public BibDatabaseWriter.SaveType getSaveType() {
return saveType;
}
public SaveConfiguration withSaveType(BibDatabaseWriter.SaveType newSaveType) {
this.saveType = newSaveType;
return this;
}
public boolean shouldReformatFile() {
return reformatFile;
}
public SaveConfiguration withReformatOnSave(boolean newReformat) {
this.reformatFile = newReformat;
return this;
}
}
| 2,483
| 27.883721
| 113
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/SaveException.java
|
package org.jabref.logic.exporter;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
/**
* Exception thrown if saving goes wrong. If caused by a specific
* entry, keeps track of which entry caused the problem.
*/
public class SaveException extends Exception {
public static final SaveException FILE_LOCKED = new SaveException(
"Could not save, file locked by another JabRef instance.",
Localization.lang("Could not save, file locked by another JabRef instance."));
public static final SaveException BACKUP_CREATION = new SaveException("Unable to create backup",
Localization.lang("Unable to create backup"));
private BibEntry entry;
private int status;
private String localizedMessage;
public SaveException(String message) {
super(message);
entry = null;
}
public SaveException(String message, Throwable exception) {
super(message, exception);
entry = null;
}
public SaveException(String message, String localizedMessage) {
super(message);
this.localizedMessage = localizedMessage;
entry = null;
}
public SaveException(String message, int status) {
super(message);
entry = null;
this.status = status;
}
public SaveException(String message, BibEntry entry) {
super(message);
this.entry = entry;
}
public SaveException(String message, String localizedMessage, BibEntry entry, Throwable base) {
super(message, base);
this.localizedMessage = localizedMessage;
this.entry = entry;
}
public SaveException(Throwable base) {
super(base.getMessage(), base);
}
public SaveException(Throwable base, BibEntry entry) {
this(base.getMessage(), base.getLocalizedMessage(), entry, base);
}
public int getStatus() {
return status;
}
public BibEntry getEntry() {
return entry;
}
public boolean specificEntry() {
return entry != null;
}
@Override
public String getLocalizedMessage() {
if (localizedMessage == null) {
return getMessage();
} else {
return localizedMessage;
}
}
}
| 2,275
| 26.095238
| 100
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/TemplateExporter.java
|
package org.jabref.logic.exporter;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Reader;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.jabref.logic.journals.JournalAbbreviationLoader;
import org.jabref.logic.journals.JournalAbbreviationRepository;
import org.jabref.logic.layout.Layout;
import org.jabref.logic.layout.LayoutFormatterPreferences;
import org.jabref.logic.layout.LayoutHelper;
import org.jabref.logic.layout.format.Number;
import org.jabref.logic.util.FileType;
import org.jabref.logic.util.OS;
import org.jabref.logic.util.StandardFileType;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.types.EntryType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for export formats based on templates.
*/
public class TemplateExporter extends Exporter {
private static final String BLANK_LINE_PATTERN = "\\r\\n|\\n";
private static final String LAYOUT_PREFIX = "/resource/layout/";
private static final String LAYOUT_EXTENSION = ".layout";
private static final String FORMATTERS_EXTENSION = ".formatters";
private static final String BEGIN_INFIX = ".begin";
private static final String END_INFIX = ".end";
private static final Logger LOGGER = LoggerFactory.getLogger(TemplateExporter.class);
private final String lfFileName;
private final String directory;
private final LayoutFormatterPreferences layoutPreferences;
private final SaveConfiguration saveConfiguration;
private boolean customExport;
private BlankLineBehaviour blankLineBehaviour;
/**
* Initialize another export format based on templates stored in dir with layoutFile lfFilename.
*
* @param displayName Name to display to the user.
* @param consoleName Name to call this format in the console.
* @param lfFileName Name of the main layout file.
* @param directory Directory in which to find the layout file.
* @param extension Should contain the . (for instance .txt).
*/
public TemplateExporter(String displayName,
String consoleName,
String lfFileName,
String directory,
FileType extension) {
this(displayName, consoleName, lfFileName, directory, extension, null, null, null);
}
/**
* Initialize another export format based on templates stored in dir with layoutFile lfFilename.
*
* @param name to display to the user and to call this format in the console.
* @param lfFileName Name of the main layout file.
* @param extension May or may not contain the . (for instance .txt).
* @param layoutPreferences Preferences for the layout
* @param saveConfiguration Preferences for saving
*/
public TemplateExporter(String name,
String lfFileName,
String extension,
LayoutFormatterPreferences layoutPreferences,
SaveConfiguration saveConfiguration) {
this(name,
name,
lfFileName,
null,
StandardFileType.fromExtensions(extension),
layoutPreferences,
saveConfiguration);
}
/**
* Initialize another export format based on templates stored in dir with layoutFile lfFilename.
*
* @param displayName Name to display to the user.
* @param consoleName Name to call this format in the console.
* @param lfFileName Name of the main layout file.
* @param directory Directory in which to find the layout file.
* @param extension Should contain the . (for instance .txt).
* @param layoutPreferences Preferences for layout
* @param saveConfiguration Preferences for saving
*/
public TemplateExporter(String displayName,
String consoleName,
String lfFileName,
String directory,
FileType extension,
LayoutFormatterPreferences layoutPreferences,
SaveConfiguration saveConfiguration) {
super(consoleName, displayName, extension);
if (Objects.requireNonNull(lfFileName).endsWith(LAYOUT_EXTENSION)) {
this.lfFileName = lfFileName.substring(0, lfFileName.length() - LAYOUT_EXTENSION.length());
} else {
this.lfFileName = lfFileName;
}
this.directory = directory;
this.layoutPreferences = layoutPreferences;
this.saveConfiguration = saveConfiguration;
}
/**
* Initialize another export format based on templates stored in dir with layoutFile lfFilename.
*
* @param displayName Name to display to the user.
* @param consoleName Name to call this format in the console.
* @param lfFileName Name of the main layout file.
* @param directory Directory in which to find the layout file.
* @param extension Should contain the . (for instance .txt).
* @param layoutPreferences Preferences for layout
* @param saveConfiguration Preferences for saving
* @param blankLineBehaviour how to behave regarding blank lines.
*/
public TemplateExporter(String displayName,
String consoleName,
String lfFileName,
String directory,
FileType extension,
LayoutFormatterPreferences layoutPreferences,
SaveConfiguration saveConfiguration,
BlankLineBehaviour blankLineBehaviour) {
super(consoleName, displayName, extension);
if (Objects.requireNonNull(lfFileName).endsWith(LAYOUT_EXTENSION)) {
this.lfFileName = lfFileName.substring(0, lfFileName.length() - LAYOUT_EXTENSION.length());
} else {
this.lfFileName = lfFileName;
}
this.directory = directory;
this.layoutPreferences = layoutPreferences;
this.saveConfiguration = saveConfiguration;
this.blankLineBehaviour = blankLineBehaviour;
}
/**
* Indicate whether this is a custom export.
* A custom export looks for its layout files using a normal file path,
* while a built-in export looks in the classpath.
*
* @param custom true to indicate a custom export format.
*/
public void setCustomExport(boolean custom) {
this.customExport = custom;
}
/**
* This method should return a reader from which the given layout file can be read.
* <p>
* Subclasses of TemplateExporter are free to override and provide their own implementation.
*
* @param filename the filename
* @return a newly created reader
* @throws IOException if the reader could not be created
*/
private Reader getReader(String filename) throws IOException {
// If this is a custom export, just use the given filename:
String dir;
if (customExport) {
dir = "";
} else {
dir = LAYOUT_PREFIX + (directory == null ? "" : directory + '/');
}
// Attempt to get a Reader for the file path given, either by
// loading it as a resource (from within JAR), or as a normal file. If
// unsuccessful (e.g. file not found), an IOException is thrown.
String name = dir + filename;
// Try loading as a resource first. This works for files inside the JAR:
// If that did not work, try loading as a normal file URL:
try {
URL res = TemplateExporter.class.getResource(name);
Path reso;
if (res == null) {
reso = Path.of(name);
} else {
reso = Path.of(res.toURI());
}
return Files.newBufferedReader(reso, StandardCharsets.UTF_8);
} catch (FileNotFoundException | URISyntaxException ex) {
throw new IOException("Cannot find layout file: '" + name + "'.");
}
}
@Override
public void export(BibDatabaseContext databaseContext, Path file, List<BibEntry> entries) throws Exception {
export(databaseContext, file, entries, Collections.emptyList(), JournalAbbreviationLoader.loadBuiltInRepository());
}
@Override
public void export(final BibDatabaseContext databaseContext,
final Path file,
List<BibEntry> entries,
List<Path> fileDirForDatabase,
JournalAbbreviationRepository abbreviationRepository) throws Exception {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(entries);
Charset encodingToUse = StandardCharsets.UTF_8;
if (entries.isEmpty()) { // Do not export if no entries to export -- avoids exports with only template text
return;
}
try (AtomicFileWriter ps = new AtomicFileWriter(file, encodingToUse)) {
Layout beginLayout = null;
// Check if this export filter has bundled name formatters:
// Add these to the preferences, so all layouts have access to the custom name formatters:
readFormatterFile();
List<String> missingFormatters = new ArrayList<>(1);
// Print header
try (Reader reader = getReader(lfFileName + BEGIN_INFIX + LAYOUT_EXTENSION)) {
LayoutHelper layoutHelper = new LayoutHelper(reader, fileDirForDatabase, layoutPreferences, abbreviationRepository);
beginLayout = layoutHelper.getLayoutFromText();
} catch (IOException ex) {
// If an exception was cast, export filter doesn't have a begin
// file.
}
// Write the header
if (beginLayout != null) {
ps.write(beginLayout.doLayout(databaseContext, encodingToUse));
missingFormatters.addAll(beginLayout.getMissingFormatters());
}
/*
* Write database entries; entries will be sorted as they appear on the
* screen, or sorted by author, depending on Preferences. We also supply
* the Set entries - if we are to export only certain entries, it will
* be non-null, and be used to choose entries. Otherwise, it will be
* null, and be ignored.
*/
List<BibEntry> sorted = BibDatabaseWriter.getSortedEntries(databaseContext, entries, saveConfiguration);
// Load default layout
Layout defLayout;
LayoutHelper layoutHelper;
try (Reader reader = getReader(lfFileName + LAYOUT_EXTENSION)) {
layoutHelper = new LayoutHelper(reader, fileDirForDatabase, layoutPreferences, abbreviationRepository);
defLayout = layoutHelper.getLayoutFromText();
}
if (defLayout != null) {
missingFormatters.addAll(defLayout.getMissingFormatters());
if (!missingFormatters.isEmpty()) {
LOGGER.warn("Missing formatters found: {}", missingFormatters);
}
}
Map<EntryType, Layout> layouts = new HashMap<>();
Layout layout;
Number.serialExportNumber = 0;
for (BibEntry entry : sorted) {
Number.serialExportNumber++; // Increment entry counter.
// Get the layout
EntryType type = entry.getType();
if (layouts.containsKey(type)) {
layout = layouts.get(type);
} else {
try (Reader reader = getReader(lfFileName + '.' + type.getName() + LAYOUT_EXTENSION)) {
// We try to get a type-specific layout for this entry.
layoutHelper = new LayoutHelper(reader, fileDirForDatabase, layoutPreferences, abbreviationRepository);
layout = layoutHelper.getLayoutFromText();
layouts.put(type, layout);
if (layout != null) {
missingFormatters.addAll(layout.getMissingFormatters());
}
} catch (IOException ex) {
// The exception indicates that no type-specific layout
// exists, so we
// go with the default one.
layout = defLayout;
}
}
// Write the entry
if (layout != null) {
if (blankLineBehaviour == BlankLineBehaviour.DELETE_BLANKS) {
String[] lines = layout.doLayout(entry, databaseContext.getDatabase()).split(BLANK_LINE_PATTERN);
for (String line : lines) {
if (!line.isBlank() && !line.isEmpty()) {
ps.write(line + OS.NEWLINE);
}
}
} else {
ps.write(layout.doLayout(entry, databaseContext.getDatabase()));
}
}
}
// Print footer
Layout endLayout = null;
try (Reader reader = getReader(lfFileName + END_INFIX + LAYOUT_EXTENSION)) {
layoutHelper = new LayoutHelper(reader, fileDirForDatabase, layoutPreferences, abbreviationRepository);
endLayout = layoutHelper.getLayoutFromText();
} catch (IOException ex) {
// If an exception was thrown, export filter doesn't have an end
// file.
}
// Write footer
if (endLayout != null) {
ps.write(endLayout.doLayout(databaseContext, encodingToUse));
missingFormatters.addAll(endLayout.getMissingFormatters());
}
// Clear custom name formatters:
layoutPreferences.clearCustomExportNameFormatters();
if (!missingFormatters.isEmpty() && LOGGER.isWarnEnabled()) {
LOGGER.warn("Formatters {} not found", String.join(", ", missingFormatters));
}
}
}
/**
* See if there is a name formatter file bundled with this export format.
* If so, read all the name formatters so they can be used by the filter layouts.
*/
private void readFormatterFile() {
Path formatterFile = Path.of(lfFileName + FORMATTERS_EXTENSION);
if (Files.exists(formatterFile)) {
try (Reader in = Files.newBufferedReader(formatterFile, StandardCharsets.UTF_8)) {
// Ok, we found and opened the file. Read all contents:
StringBuilder sb = new StringBuilder();
int c;
while ((c = in.read()) != -1) {
sb.append((char) c);
}
String[] lines = sb.toString().split("\n");
// Go through each line:
for (String line1 : lines) {
String line = line1.trim();
// Do not deal with empty lines:
if (line.isEmpty()) {
continue;
}
int index = line.indexOf(':'); // TODO: any need to accept escaped colons here?
if ((index > 0) && ((index + 1) < line.length())) {
String formatterName = line.substring(0, index);
String contents = line.substring(index + 1);
layoutPreferences.putCustomExportNameFormatter(formatterName, contents);
}
}
} catch (IOException ex) {
// TODO: show error message here?
LOGGER.warn("Problem opening formatter file.", ex);
}
}
}
public String getLayoutFileName() {
return lfFileName;
}
public String getLayoutFileNameWithExtension() {
return lfFileName + LAYOUT_EXTENSION;
}
}
| 16,736
| 42.929134
| 132
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/XmpExporter.java
|
package org.jabref.logic.exporter;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.XmpPreferences;
import org.jabref.logic.xmp.XmpUtilWriter;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.field.InternalField;
/**
* A custom exporter to write bib entries to a .xmp file for further processing
* in other scenarios and applications. The xmp metadata are written in dublin
* core format.
*/
public class XmpExporter extends Exporter {
public static final String XMP_SPLIT_DIRECTORY_INDICATOR = "split";
private final XmpPreferences xmpPreferences;
public XmpExporter(XmpPreferences xmpPreferences) {
super("xmp", "Plain XMP", StandardFileType.XMP);
this.xmpPreferences = xmpPreferences;
}
/**
* @param databaseContext the database to export from
* @param file the file to write to. If it contains "split", then the output is split into different files
* @param entries a list containing all entries that should be exported
*/
@Override
public void export(BibDatabaseContext databaseContext, Path file, List<BibEntry> entries) throws Exception {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(file);
Objects.requireNonNull(entries);
if (entries.isEmpty()) {
return;
}
// This is a distinction between writing all entries from the supplied list to a single .xmp file,
// or write every entry to a separate file.
if (file.getFileName().toString().trim().equals(XMP_SPLIT_DIRECTORY_INDICATOR)) {
for (BibEntry entry : entries) {
// Avoid situations, where two citation keys are null
Path entryFile;
String suffix = entry.getId() + "_" + entry.getField(InternalField.KEY_FIELD).orElse("null") + ".xmp";
if (file.getParent() == null) {
entryFile = Path.of(suffix);
} else {
entryFile = Path.of(file.getParent() + "/" + suffix);
}
this.writeBibToXmp(entryFile, Collections.singletonList(entry));
}
} else {
this.writeBibToXmp(file, entries);
}
}
private void writeBibToXmp(Path file, List<BibEntry> entries) throws IOException {
String xmpContent = new XmpUtilWriter(this.xmpPreferences).generateXmpStringWithoutXmpDeclaration(entries);
Files.writeString(file, xmpContent);
}
}
| 2,756
| 37.291667
| 121
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/exporter/XmpPdfExporter.java
|
package org.jabref.logic.exporter;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.l10n.Localization;
import org.jabref.logic.util.StandardFileType;
import org.jabref.logic.xmp.XmpPreferences;
import org.jabref.logic.xmp.XmpUtilWriter;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
public class XmpPdfExporter extends Exporter {
private final XmpPreferences xmpPreferences;
public XmpPdfExporter(XmpPreferences xmpPreferences) {
super("pdf", Localization.lang("XMP-annotated PDF"), StandardFileType.PDF);
this.xmpPreferences = xmpPreferences;
}
@Override
public void export(BibDatabaseContext databaseContext, Path pdfFile, List<BibEntry> entries) throws Exception {
Objects.requireNonNull(databaseContext);
Objects.requireNonNull(pdfFile);
Objects.requireNonNull(entries);
if (pdfFile.toString().endsWith(".pdf")) {
new XmpUtilWriter(xmpPreferences).writeXmp(pdfFile, entries, databaseContext.getDatabase());
}
}
}
| 1,114
| 31.794118
| 115
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/externalfiles/ExternalFilesContentImporter.java
|
package org.jabref.logic.externalfiles;
import java.io.IOException;
import java.nio.file.Path;
import org.jabref.logic.importer.ImportFormatPreferences;
import org.jabref.logic.importer.OpenDatabase;
import org.jabref.logic.importer.ParserResult;
import org.jabref.logic.importer.fileformat.PdfMergeMetadataImporter;
import org.jabref.model.util.FileUpdateMonitor;
public class ExternalFilesContentImporter {
private final ImportFormatPreferences importFormatPreferences;
public ExternalFilesContentImporter(ImportFormatPreferences importFormatPreferences) {
this.importFormatPreferences = importFormatPreferences;
}
public ParserResult importPDFContent(Path file) {
try {
return new PdfMergeMetadataImporter(importFormatPreferences).importDatabase(file);
} catch (IOException e) {
return ParserResult.fromError(e);
}
}
public ParserResult importFromBibFile(Path bibFile, FileUpdateMonitor fileUpdateMonitor) throws IOException {
return OpenDatabase.loadDatabase(bibFile, importFormatPreferences, fileUpdateMonitor);
}
}
| 1,119
| 34
| 113
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/externalfiles/LinkedFileHandler.java
|
package org.jabref.logic.externalfiles;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Stream;
import org.jabref.logic.util.io.FileUtil;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.preferences.FilePreferences;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LinkedFileHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(LinkedFileHandler.class);
private final BibDatabaseContext databaseContext;
private final FilePreferences filePreferences;
private final BibEntry entry;
private final LinkedFile fileEntry;
public LinkedFileHandler(LinkedFile fileEntry,
BibEntry entry,
BibDatabaseContext databaseContext,
FilePreferences filePreferences) {
this.fileEntry = fileEntry;
this.entry = entry;
this.databaseContext = Objects.requireNonNull(databaseContext);
this.filePreferences = Objects.requireNonNull(filePreferences);
}
public boolean moveToDefaultDirectory() throws IOException {
Optional<Path> targetDirectory = databaseContext.getFirstExistingFileDir(filePreferences);
if (targetDirectory.isEmpty()) {
return false;
}
Optional<Path> oldFile = fileEntry.findIn(databaseContext, filePreferences);
if (oldFile.isEmpty()) {
// Could not find file
return false;
}
String targetDirectoryName = "";
if (!filePreferences.getFileDirectoryPattern().isEmpty()) {
targetDirectoryName = FileUtil.createDirNameFromPattern(
databaseContext.getDatabase(),
entry,
filePreferences.getFileDirectoryPattern());
}
Path targetPath = targetDirectory.get().resolve(targetDirectoryName).resolve(oldFile.get().getFileName());
if (Files.exists(targetPath)) {
// We do not overwrite already existing files
LOGGER.debug("The file {} would have been moved to {}. However, there exists already a file with that name so we do nothing.", oldFile.get(), targetPath);
return false;
} else {
// Make sure sub-directories exist
Files.createDirectories(targetPath.getParent());
}
// Move
Files.move(oldFile.get(), targetPath);
// Update path
fileEntry.setLink(relativize(targetPath));
return true;
}
public boolean renameToSuggestedName() throws IOException {
return renameToName(getSuggestedFileName(), false);
}
public boolean renameToName(String targetFileName, boolean overwriteExistingFile) throws IOException {
Optional<Path> oldFile = fileEntry.findIn(databaseContext, filePreferences);
if (oldFile.isEmpty()) {
return false;
}
final Path oldPath = oldFile.get();
final Path newPath = oldPath.resolveSibling(targetFileName);
String expandedOldFilePath = oldPath.toString();
boolean pathsDifferOnlyByCase = newPath.toString().equalsIgnoreCase(expandedOldFilePath)
&& !newPath.toString().equals(expandedOldFilePath);
// Since Files.exists is sometimes not case-sensitive, the check pathsDifferOnlyByCase ensures that we
// nonetheless rename files to a new name which just differs by case.
if (Files.exists(newPath) && !pathsDifferOnlyByCase && !overwriteExistingFile) {
LOGGER.debug("The file {} would have been moved to {}. However, there exists already a file with that name so we do nothing.", oldPath, newPath);
return false;
}
if (Files.exists(newPath) && !pathsDifferOnlyByCase && overwriteExistingFile) {
Files.createDirectories(newPath.getParent());
LOGGER.debug("Overwriting existing file {}", newPath);
Files.move(oldPath, newPath, StandardCopyOption.REPLACE_EXISTING);
} else {
Files.createDirectories(newPath.getParent());
Files.move(oldPath, newPath);
}
// Update path
fileEntry.setLink(relativize(newPath));
return true;
}
private String relativize(Path path) {
List<Path> fileDirectories = databaseContext.getFileDirectories(filePreferences);
return FileUtil.relativize(path, fileDirectories).toString();
}
public String getSuggestedFileName() {
String oldFileName = fileEntry.getLink();
String extension = FileUtil.getFileExtension(oldFileName).orElse(fileEntry.getFileType());
return getSuggestedFileName(extension);
}
public String getSuggestedFileName(String extension) {
String targetFileName = FileUtil.createFileNameFromPattern(databaseContext.getDatabase(), entry, filePreferences.getFileNamePattern()).trim()
+ '.'
+ extension;
// Only create valid file names
return FileUtil.getValidFileName(targetFileName);
}
/**
* Check to see if a file already exists in the target directory. Search is not case sensitive.
*
* @return First identified path that matches an existing file. This name can be used in subsequent calls to
* override the existing file.
*/
public Optional<Path> findExistingFile(LinkedFile flEntry, BibEntry entry, String targetFileName) {
// The .get() is legal without check because the method will always return a value.
Path targetFilePath = flEntry.findIn(databaseContext, filePreferences)
.get().getParent().resolve(targetFileName);
Path oldFilePath = flEntry.findIn(databaseContext, filePreferences).get();
// Check if file already exists in directory with different case.
// This is necessary because other entries may have such a file.
Optional<Path> matchedByDiffCase = Optional.empty();
try (Stream<Path> stream = Files.list(oldFilePath.getParent())) {
matchedByDiffCase = stream.filter(name -> name.toString().equalsIgnoreCase(targetFilePath.toString()))
.findFirst();
} catch (IOException e) {
LOGGER.error("Could not get the list of files in target directory", e);
}
return matchedByDiffCase;
}
}
| 6,653
| 40.074074
| 166
|
java
|
null |
jabref-main/src/main/java/org/jabref/logic/formatter/Formatters.java
|
package org.jabref.logic.formatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.regex.Pattern;
import org.jabref.logic.cleanup.Formatter;
import org.jabref.logic.formatter.bibtexfields.CleanupUrlFormatter;
import org.jabref.logic.formatter.bibtexfields.ClearFormatter;
import org.jabref.logic.formatter.bibtexfields.EscapeAmpersandsFormatter;
import org.jabref.logic.formatter.bibtexfields.EscapeDollarSignFormatter;
import org.jabref.logic.formatter.bibtexfields.EscapeUnderscoresFormatter;
import org.jabref.logic.formatter.bibtexfields.HtmlToLatexFormatter;
import org.jabref.logic.formatter.bibtexfields.HtmlToUnicodeFormatter;
import org.jabref.logic.formatter.bibtexfields.LatexCleanupFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeDateFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeMonthFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizeNamesFormatter;
import org.jabref.logic.formatter.bibtexfields.NormalizePagesFormatter;
import org.jabref.logic.formatter.bibtexfields.OrdinalsToSuperscriptFormatter;
import org.jabref.logic.formatter.bibtexfields.RegexFormatter;
import org.jabref.logic.formatter.bibtexfields.RemoveBracesFormatter;
import org.jabref.logic.formatter.bibtexfields.ShortenDOIFormatter;
import org.jabref.logic.formatter.bibtexfields.UnicodeToLatexFormatter;
import org.jabref.logic.formatter.bibtexfields.UnitsToLatexFormatter;
import org.jabref.logic.formatter.casechanger.CapitalizeFormatter;
import org.jabref.logic.formatter.casechanger.LowerCaseFormatter;
import org.jabref.logic.formatter.casechanger.SentenceCaseFormatter;
import org.jabref.logic.formatter.casechanger.TitleCaseFormatter;
import org.jabref.logic.formatter.casechanger.UnprotectTermsFormatter;
import org.jabref.logic.formatter.casechanger.UpperCaseFormatter;
import org.jabref.logic.formatter.minifier.MinifyNameListFormatter;
import org.jabref.logic.formatter.minifier.TruncateFormatter;
import org.jabref.logic.layout.format.LatexToUnicodeFormatter;
import org.jabref.logic.layout.format.ReplaceUnicodeLigaturesFormatter;
public class Formatters {
private static final Pattern TRUNCATE_PATTERN = Pattern.compile("\\Atruncate\\d+\\z");
private Formatters() {
}
public static List<Formatter> getConverters() {
return Arrays.asList(
new HtmlToLatexFormatter(),
new HtmlToUnicodeFormatter(),
new LatexToUnicodeFormatter(),
new UnicodeToLatexFormatter()
);
}
public static List<Formatter> getCaseChangers() {
return Arrays.asList(
new CapitalizeFormatter(),
new LowerCaseFormatter(),
new SentenceCaseFormatter(),
new TitleCaseFormatter(),
new UpperCaseFormatter()
);
}
public static List<Formatter> getOthers() {
return Arrays.asList(
new ClearFormatter(),
new CleanupUrlFormatter(),
new LatexCleanupFormatter(),
new MinifyNameListFormatter(),
new NormalizeDateFormatter(),
new NormalizeMonthFormatter(),
new NormalizeNamesFormatter(),
new NormalizePagesFormatter(),
new OrdinalsToSuperscriptFormatter(),
new RemoveBracesFormatter(),
new UnitsToLatexFormatter(),
new EscapeUnderscoresFormatter(),
new EscapeAmpersandsFormatter(),
new EscapeDollarSignFormatter(),
new ShortenDOIFormatter(),
new ReplaceUnicodeLigaturesFormatter(),
new UnprotectTermsFormatter()
);
}
public static List<Formatter> getAll() {
List<Formatter> all = new ArrayList<>();
all.addAll(getConverters());
all.addAll(getCaseChangers());
all.addAll(getOthers());
return all;
}
public static Optional<Formatter> getFormatterForModifier(String modifier) {
Objects.requireNonNull(modifier);
switch (modifier) {
case "lower":
return Optional.of(new LowerCaseFormatter());
case "upper":
return Optional.of(new UpperCaseFormatter());
case "capitalize":
return Optional.of(new CapitalizeFormatter());
case "titlecase":
return Optional.of(new TitleCaseFormatter());
case "sentencecase":
return Optional.of(new SentenceCaseFormatter());
}
if (modifier.startsWith(RegexFormatter.KEY)) {
String regex = modifier.substring(RegexFormatter.KEY.length());
return Optional.of(new RegexFormatter(regex));
} else if (TRUNCATE_PATTERN.matcher(modifier).matches()) {
int truncateAfter = Integer.parseInt(modifier.substring(8));
return Optional.of(new TruncateFormatter(truncateAfter));
} else {
return getAll().stream().filter(f -> f.getKey().equals(modifier)).findAny();
}
}
}
| 5,206
| 41.680328
| 90
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.