repo stringlengths 1 191 ⌀ | file stringlengths 23 351 | code stringlengths 0 5.32M | file_length int64 0 5.32M | avg_line_length float64 0 2.9k | max_line_length int64 0 288k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageVersion.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import net.sourceforge.pmd.Rule;
/**
* Represents a version of a {@link Language}. Language instances provide
* a list of supported versions ({@link Language#getVersions()}). Individual
* versions can be retrieved from their version number ({@link Language#getVersion(String)}).
*
* <p>Versions are used to limit some rules to operate on only a version range.
* For instance, a rule that suggests eliding local variable types in Java
* (replacing them with {@code var}) makes no sense if the codebase is not
* using Java 10 or later. This is determined by {@link Rule#getMinimumLanguageVersion()}
* and {@link Rule#getMaximumLanguageVersion()}. These should be set in the
* ruleset XML (they're attributes of the {@code <rule>} element), and not
* overridden.
*/
public final class LanguageVersion implements Comparable<LanguageVersion> {
private final Language language;
private final String version;
private final int index;
LanguageVersion(Language language, String version, int index) {
this.language = language;
this.version = version;
this.index = index;
}
/**
* Returns the language that owns this version.
*/
public Language getLanguage() {
return language;
}
/**
* Returns the version string. This is usually a version number, e.g.
* {@code "1.7"} or {@code "11"}. This is used by {@link Language#getVersion(String)}.
*/
public String getVersion() {
return version;
}
/**
* Returns the name of this language version. This is the version string
* prefixed with the {@linkplain Language#getName() language name}.
*
* @return The name of this LanguageVersion.
*/
public String getName() {
return version.length() > 0 ? language.getName() + ' ' + version : language.getName();
}
/**
* Get the short name of this LanguageVersion. This is Language short name
* appended with the LanguageVersion version if not an empty String.
*
* @return The short name of this LanguageVersion.
*/
public String getShortName() {
return version.length() > 0 ? language.getShortName() + ' ' + version : language.getShortName();
}
/**
* Get the terse name of this LanguageVersion. This is Language terse name
* appended with the LanguageVersion version if not an empty String.
*
* @return The terse name of this LanguageVersion.
*/
public String getTerseName() {
return version.length() > 0 ? language.getTerseName() + ' ' + version : language.getTerseName();
}
/**
* Compare this version to another version of the same language identified
* by the given version string.
*
* @param versionString The version with which to compare
*
* @throws IllegalArgumentException If the argument is not a valid version
* string for the parent language
*/
public int compareToVersion(String versionString) {
LanguageVersion otherVersion = language.getVersion(versionString);
if (otherVersion == null) {
throw new IllegalArgumentException(
"No such version '" + versionString + "' for language " + language.getName());
}
return this.compareTo(otherVersion);
}
@Override
public int compareTo(LanguageVersion o) {
int cmp = language.compareTo(o.getLanguage());
if (cmp != 0) {
return cmp;
}
return Integer.compare(this.index, o.index);
}
@Override
public String toString() {
return language.toString() + "+version:" + version;
}
}
| 3,811 | 33.035714 | 104 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageProcessor.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import java.util.Collections;
import java.util.List;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.RuleSets;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.cache.AnalysisCache;
import net.sourceforge.pmd.lang.document.TextFile;
import net.sourceforge.pmd.reporting.GlobalAnalysisListener;
import net.sourceforge.pmd.util.log.MessageReporter;
/**
* Stateful object managing the analysis for a given language.
*
* @author Clément Fournier
*/
public interface LanguageProcessor extends AutoCloseable {
/**
* A collection of extension points implemented by the language.
*/
@NonNull LanguageVersionHandler services();
/**
* Launch the analysis based on the given {@link AnalysisTask analysis task}.
* The analysis only has to completion after the return value has been closed,
* as this method may launch background threads to perform the analysis and
* return without blocking. In that case the returned Closeable will join the
* analysis threads when being closed.
*
* @param analysisTask Configuration of the analysis
*
* @return A closeable - the analysis is only ended when the close method returns.
*/
@NonNull AutoCloseable launchAnalysis(
@NonNull AnalysisTask analysisTask
);
/**
* The language of this processor.
*/
@NonNull Language getLanguage();
/**
* The language version that was configured when creating this processor.
*/
@NonNull LanguageVersion getLanguageVersion();
/**
* Configuration of an analysis, as given to {@link #launchAnalysis(AnalysisTask)}.
* This includes eg the set of files to process (which may be of various languages),
* the cache manager, and the rulesets.
*/
class AnalysisTask {
private final RuleSets rulesets;
private final List<TextFile> files;
private final GlobalAnalysisListener listener;
private final int threadCount;
private final AnalysisCache analysisCache;
private final MessageReporter messageReporter;
private final LanguageProcessorRegistry lpRegistry;
/**
* Create a new task. This constructor is internal and will be
* called by PMD.
*/
@InternalApi
public AnalysisTask(RuleSets rulesets,
List<TextFile> files,
GlobalAnalysisListener listener,
int threadCount,
AnalysisCache analysisCache,
MessageReporter messageReporter,
LanguageProcessorRegistry lpRegistry) {
this.rulesets = rulesets;
this.files = files;
this.listener = listener;
this.threadCount = threadCount;
this.analysisCache = analysisCache;
this.messageReporter = messageReporter;
this.lpRegistry = lpRegistry;
}
public RuleSets getRulesets() {
return rulesets;
}
public List<TextFile> getFiles() {
return Collections.unmodifiableList(files);
}
public GlobalAnalysisListener getListener() {
return listener;
}
public int getThreadCount() {
return threadCount;
}
public AnalysisCache getAnalysisCache() {
return analysisCache;
}
public MessageReporter getMessageReporter() {
return messageReporter;
}
public LanguageProcessorRegistry getLpRegistry() {
return lpRegistry;
}
/**
* Produce a new analysis task with just different files.
*/
public AnalysisTask withFiles(List<TextFile> newFiles) {
return new AnalysisTask(
rulesets,
newFiles,
listener,
threadCount,
analysisCache,
messageReporter,
lpRegistry
);
}
}
}
| 4,245 | 29.546763 | 88 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/JvmLanguagePropertyBundle.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.internal.util.ClasspathClassLoader;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertyFactory;
/**
* Base properties class for JVM languages that use a classpath to resolve
* references. This contributes the "auxClasspath" property.
*
* @author Clément Fournier
*/
public class JvmLanguagePropertyBundle extends LanguagePropertyBundle {
// TODO make that a PropertyDescriptor<ClassLoader>
public static final PropertyDescriptor<String> AUX_CLASSPATH
= PropertyFactory.stringProperty("auxClasspath")
.desc("A classpath to use to resolve references to external types in the analysed sources. "
+ "Individual paths are separated by ; on Windows and : on other platforms. "
+ "All classes of the analysed project should be found on this classpath, including "
+ "the compiled classes corresponding to the analyzed sources themselves, and the JDK classes.")
.defaultValue("")
.build();
private ClassLoader classLoader;
public JvmLanguagePropertyBundle(Language language) {
super(language);
definePropertyDescriptor(AUX_CLASSPATH);
}
@Override
public <T> void setProperty(PropertyDescriptor<T> propertyDescriptor, T value) {
super.setProperty(propertyDescriptor, value);
if (propertyDescriptor == AUX_CLASSPATH) {
classLoader = null; // reset it.
}
}
/**
* Set the classloader to use for analysis. This overrides the
* setting of a classpath as a string via {@link #setProperty(PropertyDescriptor, Object)}.
* If the parameter is null, the classloader returned by {@link #getAnalysisClassLoader()}
* is constructed from the value of the {@link #AUX_CLASSPATH auxClasspath} property.
*/
public void setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
/**
* Returns the classloader to use to resolve classes for this language.
*/
public @NonNull ClassLoader getAnalysisClassLoader() {
if (classLoader != null) {
return classLoader;
}
// load classloader using the property.
classLoader = PMDConfiguration.class.getClassLoader();
String classpath = getProperty(AUX_CLASSPATH);
if (StringUtils.isNotBlank(classpath)) {
try {
classLoader = new ClasspathClassLoader(classpath, classLoader);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
return classLoader;
}
}
| 3,074 | 37.4375 | 131 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageFilenameFilter.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import java.io.File;
import java.io.FilenameFilter;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Set;
/**
* This is an implementation of the {@link FilenameFilter} interface which
* compares a file against a collection of Languages to see if the any are
* applicable.
*
* @author Pieter_Van_Raemdonck - Application Engineers NV/SA - www.ae.be
*/
public class LanguageFilenameFilter implements FilenameFilter {
private final Set<Language> languages;
/**
* Create a LanguageFilenameFilter for a single Language.
*
* @param language
* The Language.
*/
public LanguageFilenameFilter(Language language) {
this(Collections.singleton(language));
}
/**
* Create a LanguageFilenameFilter for a List of Languages.
*
* @param languages
* The List of Languages.
*/
public LanguageFilenameFilter(Set<Language> languages) {
this.languages = languages;
}
/**
* Check if a file should be checked by PMD. {@inheritDoc}
*/
@Override
public boolean accept(File dir, String name) {
// Any source file should have a '.' in its name...
int lastDotIndex = name.lastIndexOf('.');
if (lastDotIndex < 0) {
return false;
}
String extension = name.substring(1 + lastDotIndex).toUpperCase(Locale.ROOT);
for (Language language : languages) {
for (String ext : language.getExtensions()) {
if (extension.equalsIgnoreCase(ext)) {
return true;
}
}
}
return false;
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder("(Extension is one of: ");
for (Language language : languages) {
List<String> extensions = language.getExtensions();
for (int i = 0; i < extensions.size(); i++) {
if (i > 0) {
buffer.append(", ");
}
buffer.append(extensions.get(i));
}
}
buffer.append(')');
return buffer.toString();
}
}
| 2,334 | 27.13253 | 85 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/PlainTextLanguage.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.ast.AstInfo;
import net.sourceforge.pmd.lang.ast.Parser;
import net.sourceforge.pmd.lang.ast.Parser.ParserTask;
import net.sourceforge.pmd.lang.ast.RootNode;
import net.sourceforge.pmd.lang.ast.impl.AbstractNode;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.lang.impl.SimpleLanguageModuleBase;
/**
* A dummy language implementation whose parser produces a single node.
* This is provided for cases where a non-null language is required, but
* the parser is not useful. This is useful eg to mock rules when no other
* language is on the classpath. This language is not exposed by {@link LanguageRegistry}
* and can only be used explicitly with {@link #getInstance()}.
*
* @author Clément Fournier
* @since 6.48.0
*/
@Experimental
public final class PlainTextLanguage extends SimpleLanguageModuleBase {
private static final Language INSTANCE = new PlainTextLanguage();
static final String TERSE_NAME = "text";
private PlainTextLanguage() {
super(LanguageMetadata.withId(TERSE_NAME).name("Plain text")
.extensions("plain-text-file-goo-extension")
.addDefaultVersion("default"),
new TextLvh());
}
/**
* Returns the singleton instance of this language.
*/
public static Language getInstance() {
return INSTANCE;
}
private static final class TextLvh implements LanguageVersionHandler {
@Override
public Parser getParser() {
return PlainTextFile::new;
}
}
/**
* The only node produced by the parser of {@link PlainTextLanguage}.
*/
public static class PlainTextFile extends AbstractNode<PlainTextFile, PlainTextFile> implements RootNode {
private final AstInfo<PlainTextFile> astInfo;
PlainTextFile(ParserTask task) {
this.astInfo = new AstInfo<>(task, this);
}
@Override
public TextRegion getTextRegion() {
return getTextDocument().getEntireRegion();
}
@Override
public String getXPathNodeName() {
return "TextFile";
}
@Override
public String getImage() {
return null;
}
@Override
public String toString() {
return "Plain text file (" + getEndLine() + " lines)";
}
@Override
public AstInfo<? extends RootNode> getAstInfo() {
return astInfo;
}
}
}
| 2,718 | 28.236559 | 110 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/TokenManager.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import net.sourceforge.pmd.lang.ast.GenericToken;
/**
* Common interface for interacting with parser Token Managers.
*/
public interface TokenManager<T extends GenericToken<T>> {
T getNextToken();
}
| 332 | 18.588235 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/AbstractLanguageVersionHandler.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
/**
* This is a generic implementation of the LanguageVersionHandler interface.
*
* @see LanguageVersionHandler
*/
public abstract class AbstractLanguageVersionHandler implements LanguageVersionHandler {
}
| 335 | 20 | 88 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/AbstractPmdLanguageVersionHandler.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
/**
* Base language version handler for languages that support PMD, i.e. can build an AST
* and support AST processing stages.
*
* @author Clément Fournier
* @since 6.10.0
*/
public abstract class AbstractPmdLanguageVersionHandler extends AbstractLanguageVersionHandler {
}
| 406 | 21.611111 | 96 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageVersionDiscoverer.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import java.io.File;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* This class can discover the LanguageVersion of a source file. Further, every
* Language has a default LanguageVersion, which can be temporarily overridden
* here.
*/
public class LanguageVersionDiscoverer {
private final LanguageRegistry languageRegistry;
private final Map<Language, LanguageVersion> languageToLanguageVersion = new HashMap<>();
private LanguageVersion forcedVersion;
/**
* Build a new instance.
*
* @param forcedVersion If non-null, all files should be assigned this version.
* The methods of this class still work as usual and do not
* care about the forced language version.
*/
public LanguageVersionDiscoverer(LanguageRegistry registry, LanguageVersion forcedVersion) {
this.languageRegistry = registry;
this.forcedVersion = forcedVersion;
}
/**
* Build a new instance with no forced version.
*/
public LanguageVersionDiscoverer(LanguageRegistry registry) {
this(registry, null);
}
/**
* Set the given LanguageVersion as the current default for it's Language.
*
* @param languageVersion
* The new default for the Language.
* @return The previous default version for the language.
*/
public LanguageVersion setDefaultLanguageVersion(LanguageVersion languageVersion) {
AssertionUtil.requireParamNotNull("languageVersion", languageVersion);
LanguageVersion currentLanguageVersion = languageToLanguageVersion.put(languageVersion.getLanguage(),
languageVersion);
if (currentLanguageVersion == null) {
currentLanguageVersion = languageVersion.getLanguage().getDefaultVersion();
}
return currentLanguageVersion;
}
/**
* Get the current default LanguageVersion for the given Language.
*
* @param language
* The Language.
* @return The current default version for the language.
*/
public LanguageVersion getDefaultLanguageVersion(Language language) {
Objects.requireNonNull(language);
LanguageVersion languageVersion = languageToLanguageVersion.get(language);
if (languageVersion == null) {
languageVersion = language.getDefaultVersion();
}
return languageVersion;
}
/**
* Get the default LanguageVersion for the first Language of a given source
* file.
*
* @param sourceFile
* The file.
* @return The currently configured LanguageVersion for the source file, or
* <code>null</code> if there are no supported Languages for the
* file.
*/
public LanguageVersion getDefaultLanguageVersionForFile(File sourceFile) {
return getDefaultLanguageVersionForFile(sourceFile.getName());
}
/**
* Get the LanguageVersion for the first Language of a source file with the
* given name.
*
* @param fileName
* The file name.
* @return The currently configured LanguageVersion for the source file or
* <code>null</code> if there are no supported Languages for the
* file.
*/
public @Nullable LanguageVersion getDefaultLanguageVersionForFile(String fileName) {
List<Language> languages = getLanguagesForFile(fileName);
LanguageVersion languageVersion = null;
if (!languages.isEmpty()) {
languageVersion = getDefaultLanguageVersion(languages.get(0));
}
return languageVersion;
}
public LanguageVersion getForcedVersion() {
return forcedVersion;
}
public void setForcedVersion(LanguageVersion forceLanguageVersion) {
this.forcedVersion = forceLanguageVersion;
}
/**
* Get the Languages of a given source file.
*
* @param sourceFile
* The file.
* @return The Languages for the source file, may be empty.
*
* @deprecated PMD 7 avoids using {@link File}.
*/
@Deprecated
@DeprecatedUntil700
public List<Language> getLanguagesForFile(File sourceFile) {
return getLanguagesForFile(sourceFile.getName());
}
/**
* Get the Languages of a given source file.
*
* @param fileName
* The file name.
* @return The Languages for the source file, may be empty.
*/
public List<Language> getLanguagesForFile(String fileName) {
String extension = getExtension(fileName);
return languageRegistry.getLanguages().stream()
.filter(it -> it.hasExtension(extension))
.collect(Collectors.toList());
}
// Get the extensions from a file
private String getExtension(String fileName) {
return StringUtils.substringAfterLast(fileName, ".");
}
}
| 5,378 | 32.409938 | 109 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguagePropertyBundle.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.properties.AbstractPropertySource;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertyFactory;
import net.sourceforge.pmd.util.CollectionUtil;
/**
* A bundle of properties used by languages (see {@link Language#newPropertyBundle()}).
* This class declares language properties that are common to all languages.
* Subclasses may define more properties and provide convenient accessors to them.
*
* @author Clément Fournier
*/
public class LanguagePropertyBundle extends AbstractPropertySource {
// todo for now i think an empty value might interpret every comment
// as a suppression. I think it should disable suppression comments.
public static final PropertyDescriptor<String> SUPPRESS_MARKER
= PropertyFactory.stringProperty("suppressMarker")
.desc("Marker to identify suppression comments. "
+ "Eg a value of NOPMD will make `// NOPMD` a suppression comment in Java or JavaScript.")
.defaultValue(PMDConfiguration.DEFAULT_SUPPRESS_MARKER)
.build();
public static final String LANGUAGE_VERSION = "version";
private final PropertyDescriptor<LanguageVersion> languageVersion;
private final Language language;
/**
* Create a new bundle for the given language.
*/
public LanguagePropertyBundle(@NonNull Language language) {
this.language = language;
definePropertyDescriptor(SUPPRESS_MARKER);
languageVersion =
PropertyFactory.enumProperty(
LANGUAGE_VERSION,
CollectionUtil.associateBy(language.getVersions(), LanguageVersion::getVersion)
)
.desc("Language version to use for this language. See the --use-version CLI switch as well.")
.defaultValue(language.getDefaultVersion())
.build();
definePropertyDescriptor(languageVersion);
}
public void setLanguageVersion(String string) {
setProperty(languageVersion, languageVersion.valueFrom(string));
}
@Override
protected String getPropertySourceType() {
return "Language";
}
@Override
public String getName() {
return language.getName();
}
public Language getLanguage() {
return language;
}
public LanguageVersion getLanguageVersion() {
return getProperty(languageVersion);
}
public String getSuppressMarker() {
return getProperty(SUPPRESS_MARKER);
}
}
| 2,910 | 33.654762 | 125 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageRegistry.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.TreeSet;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.util.CollectionUtil;
/**
* A set of languages with convenient methods. In the PMD CLI, languages
* are loaded from the classloader of this class. These are in the registry
* {@link #PMD}. You can otherwise create different registries with different
* languages, eg filter some out.
*/
public final class LanguageRegistry implements Iterable<Language> {
private static final Logger LOG = LoggerFactory.getLogger(LanguageRegistry.class);
/**
* Contains the languages that support PMD and are found on the classpath
* of the classloader of this class. This can be used as a "default" registry.
*/
public static final LanguageRegistry PMD = loadLanguages(LanguageRegistry.class.getClassLoader());
private final Set<Language> languages;
private final Map<String, Language> languagesById;
private final Map<String, Language> languagesByFullName;
/**
* Create a new registry that contains the given set of languages.
* @throws NullPointerException If the parameter is null
*/
public LanguageRegistry(Set<Language> languages) {
this.languages = languages.stream()
.sorted(Comparator.comparing(Language::getTerseName, String::compareToIgnoreCase))
.collect(CollectionUtil.toUnmodifiableSet());
this.languagesById = CollectionUtil.associateBy(languages, Language::getTerseName);
this.languagesByFullName = CollectionUtil.associateBy(languages, Language::getName);
}
/**
* Creates a language registry containing a single language. Note
* that this may be inconvertible to a {@link LanguageProcessorRegistry}
* if the language depends on other languages.
*/
public static LanguageRegistry singleton(Language l) {
return new LanguageRegistry(Collections.singleton(l));
}
/**
* Creates a language registry containing the given language and
* its dependencies, fetched from this language registry or the
* parameter.
*
* @throws IllegalStateException If dependencies cannot be fulfilled.
*/
public LanguageRegistry getDependenciesOf(Language lang) {
Set<Language> result = new HashSet<>();
result.add(lang);
addDepsOrThrow(lang, result);
return new LanguageRegistry(result);
}
private void addDepsOrThrow(Language l, Set<Language> languages) {
for (String depId : l.getDependencies()) {
Language dep = getLanguageById(depId);
if (dep == null) {
throw new IllegalStateException(
"Cannot find language " + depId + " in " + this);
}
if (languages.add(dep)) {
addDepsOrThrow(dep, languages);
}
}
}
@Override
public @NonNull Iterator<Language> iterator() {
return languages.iterator();
}
/**
* Create a new registry by loading the languages registered via {@link ServiceLoader}
* on the classpath of the given classloader.
*
* @param classLoader A classloader
*/
public static @NonNull LanguageRegistry loadLanguages(ClassLoader classLoader) {
// sort languages by terse name. Avoiding differences in the order of languages
// across JVM versions / OS.
Set<Language> languages = new TreeSet<>(Comparator.comparing(Language::getTerseName, String::compareToIgnoreCase));
ServiceLoader<Language> languageLoader = ServiceLoader.load(Language.class, classLoader);
Iterator<Language> iterator = languageLoader.iterator();
while (true) {
// this loop is weird, but both hasNext and next may throw ServiceConfigurationError,
// it's more robust that way
try {
if (iterator.hasNext()) {
Language language = iterator.next();
languages.add(language);
} else {
break;
}
} catch (UnsupportedClassVersionError | ServiceConfigurationError e) {
// Some languages require java8 and are therefore only available
// if java8 or later is used as runtime.
LOG.warn("Cannot load PMD language, ignored", e);
}
}
return new LanguageRegistry(languages);
}
/**
* Returns a set of all the known languages. The ordering of the languages
* is by terse name.
*/
public Set<Language> getLanguages() {
return languages;
}
/**
* Returns a language from its {@linkplain Language#getName() full name}
* (eg {@code "Java"}). This is case sensitive.
*
* @param languageName Language name
*
* @return A language, or null if the name is unknown
*
* @deprecated Use {@link #getLanguageByFullName(String) LanguageRegistry.PMD.getLanguageByFullName}
*/
@Deprecated
@DeprecatedUntil700
public static Language getLanguage(String languageName) {
return PMD.getLanguageByFullName(languageName);
}
/**
* Returns a language from its {@linkplain Language#getId() ID}
* (eg {@code "java"}). This is case-sensitive.
*
* @param langId Language ID
*
* @return A language, or null if the name is unknown, or the parameter is null
*/
public @Nullable Language getLanguageById(@Nullable String langId) {
return languagesById.get(langId);
}
/**
* Returns a language version from its {@linkplain Language#getId() language ID}
* (eg {@code "java"}). This is case-sensitive.
*
* @param langId Language ID
* @param version Version ID
*
* @return A language, or null if the name is unknown
*/
public @Nullable LanguageVersion getLanguageVersionById(@Nullable String langId, @Nullable String version) {
Language lang = languagesById.get(langId);
if (lang == null) {
return null;
}
return version == null ? lang.getDefaultVersion()
: lang.getVersion(version);
}
/**
* Returns a language from its {@linkplain Language#getName() full name}
* (eg {@code "Java"}). This is case sensitive.
*
* @param languageName Language name
*
* @return A language, or null if the name is unknown
*/
public @Nullable Language getLanguageByFullName(String languageName) {
return languagesByFullName.get(languageName);
}
/**
* Returns a language from its {@linkplain Language#getTerseName() terse name}
* (eg {@code "java"}). This is case sensitive.
*
* @param terseName Language terse name
*
* @return A language, or null if the name is unknown
*
* @deprecated Use {@link #getLanguageById(String) LanguageRegistry.PMD.getLanguageById}.
*/
@Deprecated
@DeprecatedUntil700
public static @Nullable Language findLanguageByTerseName(@Nullable String terseName) {
return PMD.getLanguageById(terseName);
}
/**
* Returns all languages that support the given extension.
*
* @param extensionWithoutDot A file extension (without '.' prefix)
*
* @deprecated Not replaced, extension will be extended to match full name in PMD 7.
*/
@Deprecated
@DeprecatedUntil700
public static List<Language> findByExtension(String extensionWithoutDot) {
List<Language> languages = new ArrayList<>();
for (Language language : PMD.getLanguages()) {
if (language.hasExtension(extensionWithoutDot)) {
languages.add(language);
}
}
return languages;
}
/**
* Formats the set of languages with the given formatter, sort and
* join everything with commas. Convenience method.
*/
public @NonNull String commaSeparatedList(Function<? super Language, String> languageToString) {
return getLanguages().stream().map(languageToString).sorted().collect(Collectors.joining(", "));
}
@Override
public String toString() {
return "LanguageRegistry(" + commaSeparatedList(Language::getId) + ")";
}
}
| 8,980 | 35.21371 | 123 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/LanguageProcessorRegistry.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang;
import static net.sourceforge.pmd.util.StringUtil.CaseConvention.SCREAMING_SNAKE_CASE;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.sourceforge.pmd.internal.util.IOUtil;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertySource;
import net.sourceforge.pmd.util.CollectionUtil;
import net.sourceforge.pmd.util.StringUtil.CaseConvention;
import net.sourceforge.pmd.util.log.MessageReporter;
/**
* Stores all currently initialized {@link LanguageProcessor}s during analysis.
*
* @author Clément Fournier
*/
public final class LanguageProcessorRegistry implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(LanguageProcessorRegistry.class);
private final Map<Language, LanguageProcessor> processors;
private final LanguageRegistry languages;
private LanguageProcessorRegistry(Set<LanguageProcessor> processors) {
this.processors = Collections.unmodifiableMap(
CollectionUtil.associateBy(processors, LanguageProcessor::getLanguage)
);
this.languages = new LanguageRegistry(this.processors.keySet());
for (Language language : languages.getLanguages()) {
for (String id : language.getDependencies()) {
if (languages.getLanguageById(id) == null) {
throw new IllegalStateException(
"Language " + language.getId() + " has unsatisfied dependencies: " + id + " is not loaded"
);
}
}
}
}
/**
* Return the languages that are registered in this instance.
*/
public LanguageRegistry getLanguages() {
return languages;
}
/**
* Return the processor for a given language.
*
* @param l a language
*
* @throws IllegalArgumentException if the language is not part of this registry
*/
public @NonNull LanguageProcessor getProcessor(Language l) {
LanguageProcessor obj = processors.get(l);
if (obj == null) {
throw new IllegalArgumentException("Language " + l.getId() + " is not initialized in " + this);
}
return obj;
}
/**
* Close all processors in this registry.
*
* @throws LanguageTerminationException If closing any of the processors threw something
*/
@Override
public void close() throws LanguageTerminationException {
Exception e = IOUtil.closeAll(processors.values());
if (e != null) {
throw new LanguageTerminationException(e);
}
}
/**
* Create a registry with a single language processor.
*
* @throws IllegalStateException If the language depends on other languages,
* as they are then not included in this registry (see
* {@link Language#getDependencies()}).
*/
public static LanguageProcessorRegistry singleton(@NonNull LanguageProcessor lp) {
return new LanguageProcessorRegistry(Collections.singleton(lp));
}
/**
* Create a new instance by creating a processor for each language in
* the given language registry. Each processor is created using the property
* bundle that is in the map, if present. Language properties are defaulted
* to environment variables if they are not already overridden.
*
* @throws IllegalStateException If any language in the registry depends on
* languages that are not found in it, or that
* could not be instantiated (see {@link Language#getDependencies()}).
* @throws IllegalArgumentException If some entry in the map maps a language
* to an incompatible property bundle
*/
public static LanguageProcessorRegistry create(LanguageRegistry registry,
Map<Language, LanguagePropertyBundle> languageProperties,
MessageReporter messageReporter) {
Set<LanguageProcessor> processors = new HashSet<>();
for (Language language : registry) {
LanguagePropertyBundle properties = languageProperties.getOrDefault(language, language.newPropertyBundle());
if (!properties.getLanguage().equals(language)) {
throw new IllegalArgumentException("Mismatched language");
}
try {
//
readLanguagePropertiesFromEnv(properties, messageReporter);
processors.add(language.createProcessor(properties));
} catch (IllegalArgumentException e) {
messageReporter.error(e); // todo
}
}
return new LanguageProcessorRegistry(processors);
}
// TODO this should be reused when implementing the CLI
public static Map<Language, LanguagePropertyBundle> derivePropertiesFromStrings(
Map<Language, Properties> stringProperties,
MessageReporter reporter
) {
Map<Language, LanguagePropertyBundle> typedProperties = new HashMap<>();
stringProperties.forEach((l, props) -> {
LanguagePropertyBundle properties = l.newPropertyBundle();
setLanguageProperties(stringProperties, reporter, l, properties);
});
return typedProperties;
}
private static void setLanguageProperties(Map<Language, Properties> languageProperties, MessageReporter messageReporter, Language language, LanguagePropertyBundle properties) {
Properties props = languageProperties.get(language);
if (props != null) {
props.forEach((k, v) -> {
PropertyDescriptor<?> descriptor = properties.getPropertyDescriptor(k.toString());
if (descriptor == null) {
messageReporter.error("No property {0} for language {1}", k, language.getId());
return;
}
trySetPropertyCapture(properties, descriptor, v.toString(), messageReporter);
});
}
}
private static <T> void trySetPropertyCapture(PropertySource source,
PropertyDescriptor<T> propertyDescriptor,
String propertyValue,
MessageReporter reporter) {
try {
T value = propertyDescriptor.valueFrom(propertyValue);
source.setProperty(propertyDescriptor, value);
} catch (IllegalArgumentException e) {
reporter.error("Cannot set property {0} to {1}: {2}",
propertyDescriptor.name(),
propertyValue,
e.getMessage());
}
}
private static void readLanguagePropertiesFromEnv(LanguagePropertyBundle props, MessageReporter reporter) {
for (PropertyDescriptor<?> propertyDescriptor : props.getPropertyDescriptors()) {
String envVarName = getEnvironmentVariableName(props.getLanguage(), propertyDescriptor);
String propertyValue = System.getenv(envVarName);
if (propertyValue != null) {
if (props.isPropertyOverridden(propertyDescriptor)) {
// Env vars are a default, they don't override other ways to set properties.
// If the property has already been set, don't set it.
LOG.debug(
"Property {} for lang {} is already set, ignoring environment variable {}={}",
propertyDescriptor.name(),
props.getLanguage().getId(),
envVarName,
propertyValue
);
} else {
LOG.debug(
"Property {} for lang {} is not yet set, using environment variable {}={}",
propertyDescriptor.name(),
props.getLanguage().getId(),
envVarName,
propertyValue
);
trySetPropertyCapture(props, propertyDescriptor, propertyValue, reporter);
}
}
}
}
/**
* Returns the environment variable name that a user can set in order to override the default value.
*/
private static String getEnvironmentVariableName(Language lang, PropertyDescriptor<?> propertyDescriptor) {
return "PMD_" + lang.getId().toUpperCase(Locale.ROOT) + "_"
+ CaseConvention.CAMEL_CASE.convertTo(SCREAMING_SNAKE_CASE, propertyDescriptor.name());
}
@Override
public String toString() {
return "LanguageProcessorRegistry("
+ new LanguageRegistry(processors.keySet()).commaSeparatedList(Language::getId)
+ ")";
}
/**
* An exception that occurs during the closing of a {@link LanguageProcessor},
*/
public static class LanguageTerminationException extends RuntimeException {
public LanguageTerminationException(Throwable cause) {
super(cause);
}
}
}
| 9,671 | 38.966942 | 180 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/TextAvailableNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.lang.rule.xpath.NoAttribute;
/**
* Refinement of {@link Node} for nodes that can provide the underlying
* source text.
*
* @since 7.0.0
*/
public interface TextAvailableNode extends Node {
/**
* Returns the exact region of text delimiting the node in the underlying
* text document. Note that {@link #getReportLocation()} does not need
* to match this region. {@link #getReportLocation()} can be scoped down
* to a specific token, eg the class identifier. This region uses
* the translated coordinate system, ie the coordinate system of
* {@link #getTextDocument()}.
*/
@Override
TextRegion getTextRegion();
/**
* Returns the original source code underlying this node, before
* any escapes have been translated. In particular, for a {@link RootNode},
* returns the whole text of the file.
*
* @see TextDocument#sliceOriginalText(TextRegion)
*/
@NoAttribute
default Chars getOriginalText() {
return getTextDocument().sliceOriginalText(getTextRegion());
}
/**
* Returns the source code underlying this node, after any escapes
* have been translated. In particular, for a {@link RootNode}, returns
* the whole text of the file.
*
* @see TextDocument#sliceTranslatedText(TextRegion)
*/
@NoAttribute
default Chars getText() {
return getTextDocument().sliceTranslatedText(getTextRegion());
}
}
| 1,765 | 29.448276 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/AstInfo.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.Collections;
import java.util.Map;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.LanguageProcessor;
import net.sourceforge.pmd.lang.LanguageProcessorRegistry;
import net.sourceforge.pmd.lang.ast.Parser.ParserTask;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* The output of {@link Parser#parse(ParserTask)}.
*
* @param <T> Type of root nodes
*/
public final class AstInfo<T extends RootNode> {
private final TextDocument textDocument;
private final T rootNode;
private final LanguageProcessorRegistry lpReg;
private final Map<Integer, String> suppressionComments;
public AstInfo(ParserTask task, T rootNode) {
this(task.getTextDocument(), rootNode, task.getLpRegistry(), Collections.emptyMap());
}
private AstInfo(TextDocument textDocument,
T rootNode,
LanguageProcessorRegistry lpReg,
Map<Integer, String> suppressionComments) {
this.textDocument = AssertionUtil.requireParamNotNull("text document", textDocument);
this.rootNode = AssertionUtil.requireParamNotNull("root node", rootNode);
this.lpReg = lpReg;
this.suppressionComments = AssertionUtil.requireParamNotNull("suppress map", suppressionComments);
}
public T getRootNode() {
return rootNode;
}
/**
* Returns the text document that was parsed.
* This has info like language version, etc.
*/
public @NonNull TextDocument getTextDocument() {
return textDocument;
}
/**
* Returns the language processor that parsed the tree.
*/
public LanguageProcessor getLanguageProcessor() {
return lpReg.getProcessor(textDocument.getLanguageVersion().getLanguage());
}
/**
* Returns the map of line numbers to suppression / review comments.
* Only single line comments are considered, that start with the configured
* "suppressMarker", which by default is "PMD". The text after the
* suppressMarker is used as a "review comment" and included in this map.
*
* <p>
* This map is later used to determine, if a violation is being suppressed.
* It is suppressed, if the line of the violation is contained in this suppress map.
*
* @return map of the suppress lines with the corresponding review comments.
*/
@Experimental
public Map<Integer, String> getSuppressionComments() {
return suppressionComments;
}
@Experimental
public AstInfo<T> withSuppressMap(Map<Integer, String> map) {
return new AstInfo<>(
textDocument,
rootNode,
lpReg,
map
);
}
}
| 2,976 | 30.336842 | 106 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/Node.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.ast.NodeStream.DescendantNodeStream;
import net.sourceforge.pmd.lang.ast.internal.StreamImpl;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.lang.rule.xpath.Attribute;
import net.sourceforge.pmd.lang.rule.xpath.NoAttribute;
import net.sourceforge.pmd.lang.rule.xpath.XPathVersion;
import net.sourceforge.pmd.lang.rule.xpath.impl.AttributeAxisIterator;
import net.sourceforge.pmd.lang.rule.xpath.impl.XPathHandler;
import net.sourceforge.pmd.lang.rule.xpath.internal.DeprecatedAttrLogger;
import net.sourceforge.pmd.lang.rule.xpath.internal.SaxonXPathRuleQuery;
import net.sourceforge.pmd.reporting.Reportable;
import net.sourceforge.pmd.util.DataMap;
import net.sourceforge.pmd.util.DataMap.DataKey;
/**
* Root interface for all AST nodes. This interface provides only the API
* shared by all AST implementations in PMD language modules. This includes for now:
* <ul>
* <li>Tree traversal methods: {@link #getParent()}, {@link #getIndexInParent()},
* {@link #getChild(int)}, and {@link #getNumChildren()}. These four basic
* operations are used to implement more specific traversal operations,
* like {@link #firstChild(Class)}, and {@link NodeStream}s.
* <li>The API used to describe nodes in a form understandable by XPath expressions:
* {@link #getXPathNodeName()}, {@link #getXPathAttributesIterator()}
* <li>Location metadata: eg {@link #getBeginLine()}, {@link #getBeginColumn()}
* <li>An extensible metadata store: {@link #getUserMap()}
* </ul>
*
* <p>Every language implementation must publish a sub-interface of Node
* which serves as a supertype for all nodes of that language (e.g.
* pmd-java provides JavaNode, pmd-apex provides ApexNode, etc.). It is
* assumed in many places that the {@link #getChild(int)} and {@link #getParent()}
* method return an instance of this sub-interface. For example,
* no JSP node should have a Java node as its child. Embedding nodes from
* different languages will not be done via these methods, and conforming
* implementations should ensure that every node returned by these methods
* are indeed of the same type. Possibly, a type parameter will be added to
* the Node interface in 7.0.0 to enforce it at compile-time.
*/
public interface Node extends Reportable {
/**
* Compares nodes according to their location in the file.
* Note that this comparator is not <i>consistent with equals</i>
* (see {@link Comparator}) as some nodes have the same location.
*/
Comparator<Node> COORDS_COMPARATOR =
Comparator.comparing(Node::getReportLocation, FileLocation.COMPARATOR);
/**
* Returns a string token, usually filled-in by the parser, which describes some textual characteristic of this
* node. This is usually an identifier, but you should check that using the Designer. On most nodes though, this
* method returns {@code null}.
*
* @deprecated Should be replaced with methods that have more specific
* names in node classes.
*/
@Deprecated
@DeprecatedUntil700
default String getImage() {
return null;
}
/**
* Returns true if this node's image is equal to the given string.
*
* @param image The image to check
*
* @deprecated See {@link #getImage()}
*/
@Deprecated
@DeprecatedUntil700
default boolean hasImageEqualTo(String image) {
return Objects.equals(getImage(), image);
}
/**
* Compare the coordinates of this node with the other one as if
* with {@link #COORDS_COMPARATOR}. The result is useless
* if both nodes are not from the same tree.
*
* @param other Other node
*
* @return A positive integer if this node comes AFTER the other,
* 0 if they have the same position, a negative integer if this
* node comes BEFORE the other
*/
default int compareLocation(Node other) {
return COORDS_COMPARATOR.compare(this, other);
}
/**
* {@inheritDoc}
* This is not necessarily the exact boundaries of the node in the
* text. Nodes that can provide exact position information do so
* using a {@link TextRegion}, by implementing {@link TextAvailableNode}.
*
* <p>Use this instead of {@link #getBeginColumn()}/{@link #getBeginLine()}, etc.
*/
@Override
default FileLocation getReportLocation() {
return getAstInfo().getTextDocument().toLocation(getTextRegion());
}
/**
* Returns a region of text delimiting the node in the underlying
* text document. This does not necessarily match the
* {@link #getReportLocation() report location}.
*/
TextRegion getTextRegion();
// Those are kept here because they're handled specially as XPath
// attributes, for now
@Override
default int getBeginLine() {
return Reportable.super.getBeginLine();
}
@Override
default int getBeginColumn() {
return Reportable.super.getBeginColumn();
}
@Override
default int getEndLine() {
return Reportable.super.getEndLine();
}
@Override
default int getEndColumn() {
return Reportable.super.getEndColumn();
}
/**
* Returns true if this node is considered a boundary by traversal
* methods. Traversal methods such as {@link #descendants()}
* don't look past such boundaries by default, which is usually the
* expected thing to do. For example, in Java, lambdas and nested
* classes are considered find boundaries.
*
* <p>Note: This attribute is deprecated for XPath queries. It is not useful
* for XPath queries and will be removed with PMD 7.0.0.
*
* @return True if this node is a find boundary
*
* @see DescendantNodeStream#crossFindBoundaries(boolean)
*/
@NoAttribute
default boolean isFindBoundary() {
return false;
}
/**
* Returns the n-th parent or null if there are less than {@code n} ancestors.
*
* <pre>{@code
* getNthParent(1) == jjtGetParent
* }</pre>
*
* @param n how many ancestors to iterate over.
* @return the n-th parent or null.
* @throws IllegalArgumentException if {@code n} is negative or zero.
*
* @deprecated Use node stream methods: {@code node.ancestors().get(n-1)}
*/
@Deprecated
@DeprecatedUntil700
default Node getNthParent(int n) {
return ancestors().get(n - 1);
}
/**
* Traverses up the tree to find the first parent instance of type parentType or one of its subclasses.
*
* @param parentType Class literal of the type you want to find
* @param <T> The type you want to find
* @return Node of type parentType. Returns null if none found.
*
* @deprecated Use node stream methods: {@code node.ancestors(parentType).first()}
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> T getFirstParentOfType(Class<? extends T> parentType) {
return this.<T>ancestors(parentType).first();
}
/**
* Traverses up the tree to find all of the parent instances of type parentType or one of its subclasses. The nodes
* are ordered deepest-first.
*
* @param parentType Class literal of the type you want to find
* @param <T> The type you want to find
* @return List of parentType instances found.
*
* @deprecated Use node stream methods: {@code node.ancestors(parentType).toList()}.
* Most usages don't really need a list though, eg you can iterate the node stream instead
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> List<T> getParentsOfType(Class<? extends T> parentType) {
return this.<T>ancestors(parentType).toList();
}
/**
* Traverses the children to find all the instances of type childType or one of its subclasses.
*
* @param childType class which you want to find.
* @return List of all children of type childType. Returns an empty list if none found.
* @see #findDescendantsOfType(Class) if traversal of the entire tree is needed.
*
* @deprecated Use node stream methods: {@code node.children(childType).toList()}.
* Most usages don't really need a list though, eg you can iterate the node stream instead
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> List<T> findChildrenOfType(Class<? extends T> childType) {
return this.<T>children(childType).toList();
}
/**
* Traverses down the tree to find all the descendant instances of type descendantType without crossing find
* boundaries.
*
* @param targetType class which you want to find.
* @return List of all children of type targetType. Returns an empty list if none found.
*
* @deprecated Use node stream methods: {@code node.descendants(targetType).toList()}.
* Most usages don't really need a list though, eg you can iterate the node stream instead
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> List<T> findDescendantsOfType(Class<? extends T> targetType) {
return this.<T>descendants(targetType).toList();
}
/**
* Traverses down the tree to find all the descendant instances of type
* descendantType.
*
* @param targetType
* class which you want to find.
* @param crossFindBoundaries
* if <code>false</code>, recursion stops for nodes for which
* {@link #isFindBoundary()} is <code>true</code>
* @return List of all matching descendants
*
* @deprecated Use node stream methods: {@code node.descendants(targetType).crossFindBoundaries(b).toList()}.
* Most usages don't really need a list though, eg you can iterate the node stream instead
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> List<T> findDescendantsOfType(Class<? extends T> targetType, boolean crossFindBoundaries) {
return this.<T>descendants(targetType).crossFindBoundaries(crossFindBoundaries).toList();
}
/**
* Traverses the children to find the first instance of type childType.
*
* @param childType class which you want to find.
* @return Node of type childType. Returns <code>null</code> if none found.
* @see #getFirstDescendantOfType(Class) if traversal of the entire tree is needed.
*
* @deprecated Use {@link #firstChild(Class)}
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> T getFirstChildOfType(Class<? extends T> childType) {
return firstChild(childType);
}
/**
* Traverses down the tree to find the first descendant instance of type descendantType without crossing find
* boundaries.
*
* @param descendantType class which you want to find.
* @return Node of type descendantType. Returns <code>null</code> if none found.
*
* @deprecated Use node stream methods: {@code node.descendants(targetType).first()}.
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> T getFirstDescendantOfType(Class<? extends T> descendantType) {
return descendants(descendantType).first();
}
/**
* Finds if this node contains a descendant of the given type without crossing find boundaries.
*
* @param type the node type to search
* @return <code>true</code> if there is at least one descendant of the given type
*
* @deprecated Use node stream methods: {@code node.descendants(targetType).nonEmpty()}.
*/
@Deprecated
@DeprecatedUntil700
default <T extends Node> boolean hasDescendantOfType(Class<? extends T> type) {
return descendants(type).nonEmpty();
}
/**
* Returns all the nodes matching the xpath expression.
*
* @param xpathString the expression to check
* @return List of all matching nodes. Returns an empty list if none found.
* @deprecated This is very inefficient and should not be used in new code. PMD 7.0.0 will remove
* support for this method.
*/
@Deprecated
default List<Node> findChildNodesWithXPath(String xpathString) {
return new SaxonXPathRuleQuery(
xpathString,
XPathVersion.DEFAULT,
Collections.emptyMap(),
XPathHandler.noFunctionDefinitions(),
// since this method will be removed, we don't log anything anymore
DeprecatedAttrLogger.noop()
).evaluate(this);
}
/**
* Returns a data map used to store additional information on this node.
*
* @return The user data map of this node
*
* @since 6.22.0
*/
DataMap<DataKey<?, ?>> getUserMap();
/**
* Returns the text document from which this tree was parsed. This
* means, that the whole file text is in memory while the AST is.
*
* @return The text document
*/
default @NonNull TextDocument getTextDocument() {
return getAstInfo().getTextDocument();
}
/**
* Returns the parent of this node, or null if this is the {@linkplain RootNode root}
* of the tree.
*
* @return The parent of this node
*
* @since 6.21.0
*/
Node getParent();
/**
* Returns the child of this node at the given index.
*
* @throws IndexOutOfBoundsException if the index is negative or greater than {@link #getNumChildren()}.
* @since 6.21.0
*/
Node getChild(int index);
/**
* Returns the number of children of this node.
*
* @since 6.21.0
*/
int getNumChildren();
/**
* Returns the index of this node in its parent's children. If this
* node is a {@linkplain RootNode root node}, returns -1.
*
* @return The index of this node in its parent's children
*
* @since 6.21.0
*/
int getIndexInParent();
/**
* Calls back the visitor's visit method corresponding to the runtime
* type of this Node. This should usually be preferred to calling
* a {@code visit} method directly (usually the only calls to those
* are in the implementations of this {@code acceptVisitor} method).
*
* @param <R> Return type of the visitor
* @param <P> Parameter type of the visitor
* @param visitor Visitor to dispatch
* @param data Parameter to the visit
*
* @return What the visitor returned. If this node doesn't recognize
* the type of the visitor, returns {@link AstVisitor#cannotVisit(Node, Object) visitor.cannotVisit(this, data)}.
*
* @implSpec A typical implementation will check the type of the visitor to
* be that of the language specific visitor, then call the most specific
* visit method of this Node. This is typically implemented by having
* a different override per concrete node class (no shortcuts).
*
* The default implementation calls back {@link AstVisitor#cannotVisit(Node, Object)}.
*
* @since 7.0.0
*/
default <P, R> R acceptVisitor(AstVisitor<? super P, ? extends R> visitor, P data) {
return visitor.cannotVisit(this, data);
}
/**
* Returns the {@link AstInfo} for this root node.
*
* @implNote This default implementation can not work unless overridden in the root node.
*/
default AstInfo<? extends RootNode> getAstInfo() {
return getRoot().getAstInfo();
}
/**
* Gets the name of the node that is used to match it with XPath queries.
*
* @return The XPath node name
*/
String getXPathNodeName();
/**
* Returns an iterator enumerating all the attributes that are available
* from XPath for this node.
*
* @return An attribute iterator for this node
*/
default Iterator<Attribute> getXPathAttributesIterator() {
return new AttributeAxisIterator(this);
}
/**
* Returns the first child of this node, or null if it doesn't exist.
*
* @since 7.0.0
*/
default @Nullable Node getFirstChild() {
return getNumChildren() > 0 ? getChild(0) : null;
}
/**
* Returns the first last of this node, or null if it doesn't exist.
*
* @since 7.0.0
*/
default @Nullable Node getLastChild() {
return getNumChildren() > 0 ? getChild(getNumChildren() - 1) : null;
}
/**
* Returns the previous sibling of this node, or null if it does not exist.
*
* @since 7.0.0
*/
default @Nullable Node getPreviousSibling() {
Node parent = getParent();
int idx = getIndexInParent();
if (parent != null && idx > 0) {
return parent.getChild(idx - 1);
}
return null;
}
/**
* Returns the next sibling of this node, or null if it does not exist.
*
* @since 7.0.0
*/
default @Nullable Node getNextSibling() {
Node parent = getParent();
int idx = getIndexInParent();
if (parent != null && idx + 1 < parent.getNumChildren()) {
return parent.getChild(idx + 1);
}
return null;
}
/**
* Returns a node stream containing only this node.
* {@link NodeStream#of(Node)} is a null-safe version
* of this method.
*
* @return A node stream containing only this node
*
* @see NodeStream#of(Node)
* @since 7.0.0
*/
default NodeStream<? extends Node> asStream() {
return StreamImpl.singleton(this);
}
/**
* Returns a node stream containing all the children of
* this node. This method does not provide much type safety,
* you'll probably want to use {@link #children(Class)}.
*
* @see NodeStream#children(Class)
* @since 7.0.0
*/
default NodeStream<? extends Node> children() {
return StreamImpl.children(this);
}
/**
* Returns a node stream containing all the descendants
* of this node. See {@link DescendantNodeStream} for details.
*
* @return A node stream of the descendants of this node
*
* @see NodeStream#descendants()
* @since 7.0.0
*/
default DescendantNodeStream<? extends Node> descendants() {
return StreamImpl.descendants(this);
}
/**
* Returns a node stream containing this node, then all its
* descendants. See {@link DescendantNodeStream} for details.
*
* @return A node stream of the whole subtree topped by this node
*
* @see NodeStream#descendantsOrSelf()
* @since 7.0.0
*/
default DescendantNodeStream<? extends Node> descendantsOrSelf() {
return StreamImpl.descendantsOrSelf(this);
}
/**
* Returns a node stream containing all the strict ancestors of this node,
* in innermost to outermost order. The returned stream doesn't contain this
* node, and is empty if this node has no parent.
*
* @return A node stream of the ancestors of this node
*
* @see NodeStream#ancestors()
* @since 7.0.0
*/
default NodeStream<? extends Node> ancestors() {
return StreamImpl.ancestors(this);
}
/**
* Returns a node stream containing this node and its ancestors.
* The nodes of the returned stream are yielded in a depth-first fashion.
*
* @return A stream of ancestors
*
* @see NodeStream#ancestorsOrSelf()
* @since 7.0.0
*/
default NodeStream<? extends Node> ancestorsOrSelf() {
return StreamImpl.ancestorsOrSelf(this);
}
/**
* Returns a {@linkplain NodeStream node stream} of the {@linkplain #children() children}
* of this node that are of the given type.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see NodeStream#children(Class)
* @since 7.0.0
*/
default <R extends Node> NodeStream<R> children(Class<? extends R> rClass) {
return StreamImpl.children(this, rClass);
}
/**
* Returns the first child of this node that has the given type.
* Returns null if no such child exists.
*
* <p>If you want to process this element as a node stream, use
* {@code asStream().firstChild(rClass)} instead, which returns
* a node stream.
*
* @param rClass Type of the child to find
* @param <R> Type of the child to find
*
* @return A child, or null
*
* @since 7.0.0
*/
default <R extends Node> @Nullable R firstChild(Class<? extends R> rClass) {
return children(rClass).first();
}
/**
* Returns a {@linkplain NodeStream node stream} of the {@linkplain #descendants() descendants}
* of this node that are of the given type. See {@link DescendantNodeStream}
* for details.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see NodeStream#descendants(Class)
* @since 7.0.0
*/
default <R extends Node> DescendantNodeStream<R> descendants(Class<? extends R> rClass) {
return StreamImpl.descendants(this, rClass);
}
/**
* Returns the {@linkplain #ancestors() ancestor stream} of this node
* filtered by the given node type.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see NodeStream#ancestors(Class)
* @since 7.0.0
*/
default <R extends Node> NodeStream<R> ancestors(Class<? extends R> rClass) {
return StreamImpl.ancestors(this, rClass);
}
/**
* Returns the root of the tree this node is declared in.
*
* @since 7.0.0
*/
default @NonNull RootNode getRoot() {
Node r = this;
while (r.getParent() != null) {
r = r.getParent();
}
if (!(r instanceof RootNode)) {
throw new AssertionError("Root of the tree should implement RootNode");
}
return (RootNode) r;
}
/**
* Returns the language version of this node.
*/
default LanguageVersion getLanguageVersion() {
return getTextDocument().getLanguageVersion();
}
}
| 23,171 | 32.534009 | 120 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/GenericToken.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.stream.Stream;
import org.apache.commons.lang3.StringUtils;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.reporting.Reportable;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* Represents a token, part of a token chain in a source file. Tokens
* are the individual "words" of a programming language, such as literals,
* identifiers, keywords, or comments. Tokens are produced by a lexer and
* are used by a parser implementation to build an AST {@link Node}. Tokens
* should generally not be manipulated in rules directly as they have little
* to no semantic information.
*/
public interface GenericToken<T extends GenericToken<T>> extends Comparable<T>, Reportable {
/**
* Obtain the next generic token according to the input stream which generated the instance of this token.
*
* @return the next generic token if it exists; null if it does not exist
*/
T getNext();
/**
* Obtain a comment-type token which, according to the input stream which generated the instance of this token,
* precedes this instance token and succeeds the previous generic token (if there is any).
*
* @return the comment-type token if it exists; null if it does not exist
*/
T getPreviousComment();
/**
* Returns the token's text as a string.
*/
default String getImage() {
return getImageCs().toString();
}
/**
* Returns the text of the token as a char sequence.
* This should be preferred when you can use eg {@link StringUtils}
* to do some processing, without having to create a string.
*/
CharSequence getImageCs();
/**
* Returns true if the image of this token equals
* the given charsequence. This does not create a
* string.
*
* @param charSeq A character sequence
*/
default boolean imageEquals(CharSequence charSeq) {
CharSequence imageCs = getImageCs();
if (imageCs instanceof Chars) {
return ((Chars) imageCs).contentEquals(charSeq);
}
return StringUtils.equals(imageCs, charSeq);
}
/** Returns a text region with the coordinates of this token. */
TextRegion getRegion();
/**
* Returns true if this token is an end-of-file token. This is the
* last token of token sequences that have been fully lexed.
*/
boolean isEof();
/**
* Returns true if this token is implicit, ie was inserted artificially
* and has a zero-length image.
*/
default boolean isImplicit() {
return false;
}
/**
* This must return true if this token comes before the other token.
* If they start at the same index, then the smaller token comes before
* the other.
*/
@Override
default int compareTo(T o) {
return getRegion().compareTo(o.getRegion());
}
/**
* Returns an iterator that enumerates all (non-special) tokens
* between the two tokens (bounds included).
*
* @param from First token to yield (inclusive)
* @param to Last token to yield (inclusive)
*
* @return An iterator
*
* @throws IllegalArgumentException If the first token does not come before the other token
*/
static <T extends GenericToken<T>> Iterable<T> range(T from, T to) {
if (from.compareTo(to) > 0) {
throw new IllegalArgumentException(from + " must come before " + to);
}
return () -> IteratorUtil.generate(from, t -> t == to ? null : t.getNext());
}
/**
* Returns a stream corresponding to {@link #range(GenericToken, GenericToken)}.
*/
static <T extends GenericToken<T>> Stream<T> streamRange(T from, T to) {
return IteratorUtil.toStream(range(from, to).iterator());
}
/**
* Returns an iterable that enumerates all special tokens belonging
* to the given token.
*
* @param from Token from which to start, note that the returned iterable
* does not contain that token
*
* @return An iterator, possibly empty, not containing the parameter
*
* @throws NullPointerException If the parameter s null
*/
static <T extends GenericToken<T>> Iterable<T> previousSpecials(T from) {
return () -> IteratorUtil.generate(from.getPreviousComment(), GenericToken::getPreviousComment);
}
/**
* Gets a unique integer representing the kind of token this is.
* The semantics of this kind depend on the language.
*
* <p><strong>Note:</strong> This is an experimental API.
*
* <p>The returned constants can be looked up in the language's "*ParserConstants",
* e.g. CppParserConstants or JavaParserConstants. These constants are considered
* internal API and may change at any time when the language's grammar is changed.
*/
@Experimental
int getKind();
}
| 5,163 | 31.89172 | 115 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/SemanticErrorReporter.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.text.MessageFormat;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.slf4j.event.Level;
import net.sourceforge.pmd.util.StringUtil;
import net.sourceforge.pmd.util.log.MessageReporter;
/**
* Reports errors that occur after parsing. This may be used to implement
* semantic checks in a language specific way.
*/
public interface SemanticErrorReporter {
// TODO use resource bundle keys instead of string messages.
/**
* Report a warning at the given location. Warnings do not abort
* the analysis. They are usually recoverable errors. They are used
* to warn the user that something wrong is going on, which may cause
* subsequent errors or inconsistent behavior.
*
* @param location Location where the warning should be reported
* @param message Message (rendered using a {@link MessageFormat})
* @param formatArgs Format arguments
*/
void warning(Node location, String message, Object... formatArgs);
/**
* Report an error at the given location. Errors abort subsequent analysis
* and cause a processing error to be put in the report. The produced error
* can be thrown by the caller if it cannot be recovered from.
*
* @param location Location where the error should be reported
* @param message Message (rendered using a {@link MessageFormat})
* @param formatArgs Format arguments
*/
SemanticException error(Node location, String message, Object... formatArgs);
/**
* If {@link #error(Node, String, Object...)} has been called, return
* a semantic exception instance with the correct message. If it has been
* called more than once, return the first exception, possibly with suppressed
* exceptions for subsequent calls to {@link #error(Node, String, Object...)}.
*/
@Nullable SemanticException getFirstError();
static SemanticErrorReporter noop() {
return new SemanticErrorReporter() {
private SemanticException exception;
@Override
public void warning(Node location, String message, Object... formatArgs) {
// noop
}
@Override
public SemanticException error(Node location, String message, Object... formatArgs) {
SemanticException ex = new SemanticException(MessageFormat.format(message, formatArgs));
if (this.exception == null) {
this.exception = ex;
} else {
this.exception.addSuppressed(ex);
}
return ex;
}
@Override
public @Nullable SemanticException getFirstError() {
return exception;
}
};
}
/**
* Forwards to a {@link MessageReporter}, except trace and debug
* messages which are reported on a logger.
*/
static SemanticErrorReporter reportToLogger(MessageReporter reporter) {
return new SemanticErrorReporter() {
private SemanticException exception = null;
private String locPrefix(Node loc) {
return "at " + loc.getReportLocation().startPosToStringWithFile()
+ ": ";
}
private String makeMessage(Node location, String message, Object[] args) {
return locPrefix(location) + MessageFormat.format(message, args);
}
private String logMessage(Level level, Node location, String message, Object[] args) {
String fullMessage = makeMessage(location, message, args);
reporter.log(level, StringUtil.quoteMessageFormat(fullMessage)); // already formatted
return fullMessage;
}
@Override
public void warning(Node location, String message, Object... args) {
logMessage(Level.DEBUG, location, message, args);
}
@Override
public SemanticException error(Node location, String message, Object... args) {
String fullMessage = logMessage(Level.ERROR, location, message, args);
SemanticException ex = new SemanticException(fullMessage);
if (this.exception == null) {
this.exception = ex;
} else {
this.exception.addSuppressed(ex);
}
return ex;
}
@Override
public @Nullable SemanticException getFirstError() {
return exception;
}
};
}
}
| 4,766 | 34.574627 | 104 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/AstVisitorBase.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
/**
* Base implementation of {@link AstVisitor}, that performs a top-down
* (preorder) visit and may accumulate a result.
*
* <p>Note that if you care about the result ({@code <R>}), then you need
* to override {@link #visitChildren(Node, Object) visitChildren} to implement
* the logic that combines values from children, if any.
*/
public abstract class AstVisitorBase<P, R> implements AstVisitor<P, R> {
/**
* Visit the children. By default the data parameter is passed unchanged
* to all descendants, and null is returned. Override this method to customize
* this behavior.
*
* @param node Node whose children should be visited
* @param data Parameter of the visit
*
* @return Some value for the children
*/
// kept separate from super.visit for clarity
protected R visitChildren(Node node, P data) {
// this explicit loop is faster than iterating on a children node stream.
for (int i = 0, numChildren = node.getNumChildren(); i < numChildren; i++) {
node.getChild(i).acceptVisitor(this, data);
}
return null;
}
@Override
public R visitNode(Node node, P param) {
return visitChildren(node, param);
}
}
| 1,369 | 31.619048 | 84 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/FileAnalysisException.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.Objects;
import org.apache.commons.lang3.StringUtils;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.document.FileId;
import net.sourceforge.pmd.lang.document.FileLocation;
/**
* An exception that occurs while processing a file. Subtypes include
* <ul>
* <li>{@link TokenMgrError}: lexical syntax errors
* <li>{@link ParseException}: syntax errors
* <li>{@link SemanticException}: exceptions occurring after the parsing
* phase, because the source code is semantically invalid
* </ul>
*/
public class FileAnalysisException extends RuntimeException {
private FileId fileId = FileId.UNKNOWN;
public FileAnalysisException() {
super();
}
public FileAnalysisException(String message) {
super(message);
}
public FileAnalysisException(Throwable cause) {
super(cause);
}
public FileAnalysisException(String message, Throwable cause) {
super(message, cause);
}
public FileAnalysisException setFileId(FileId fileId) {
this.fileId = Objects.requireNonNull(fileId);
return this;
}
protected boolean hasFileName() {
return !FileId.UNKNOWN.equals(fileId);
}
/**
* The name of the file in which the error occurred.
*/
public @NonNull FileId getFileId() {
return fileId;
}
@Override
public final String getMessage() {
return errorKind() + StringUtils.uncapitalize(positionToString()) + ": " + super.getMessage();
}
protected String errorKind() {
return "Error";
}
protected @Nullable FileLocation location() {
return null;
}
private String positionToString() {
String result = "";
if (hasFileName()) {
result += " in file '" + getFileId().getOriginalPath() + "'";
}
FileLocation loc = location();
if (loc != null) {
result += " at " + loc.startPosToString();
}
return result;
}
/**
* Wraps the cause into an analysis exception. If it is itself an analysis
* exception, just returns it after setting the filename for context.
*
* @param fileId Filename
* @param message Context message, if the cause is not a {@link FileAnalysisException}
* @param cause Exception to wrap
*
* @return An exception
*/
public static FileAnalysisException wrap(@NonNull FileId fileId, @NonNull String message, @NonNull Throwable cause) {
if (cause instanceof FileAnalysisException) {
return ((FileAnalysisException) cause).setFileId(fileId);
}
String fullMessage = "In file '" + fileId.getAbsolutePath() + "': " + message;
return new FileAnalysisException(fullMessage, cause).setFileId(fileId);
}
}
| 3,020 | 27.233645 | 121 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/AstVisitor.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
/**
* Root interface for AST visitors. Language modules publish a subinterface
* with one separate visit method for each type of node in the language,
* eg JavaVisitor.
*
* <p>Usually you never want to call {@code visit} methods manually, instead
* calling {@link Node#acceptVisitor(AstVisitor, Object) Node::acceptVisitor},
* which then dispatches to the most specific method of the visitor instance.
*
* <p>Use {@link Void} as a type parameter if you don't want a parameter type
* or a return type.
*
* @param <P> Parameter type of the visit method
* @param <R> Return type of the visit method
*/
public interface AstVisitor<P, R> {
/**
* Called by a node when it detects that the visitor is not of the
* language it is used to visiting. If a visitor wants to visit nodes
* for several languages, it should provide a useful implementation
* of this method. The default implementation throws
*
* @param node Node calling back this method
* @param param Parameter of the visit
*
* @return A value (or may throw)
*/
default R cannotVisit(Node node, P param) {
throw new UnsupportedOperationException("Cannot visit " + node);
}
/**
* Visit a node. This method is dispatched statically, you should
* use {@link Node#acceptVisitor(AstVisitor, Object)} if you want
* to call the most specific method instead.
*
* @param node Node to visit
* @param param Parameter
*
* @return Some result
*/
R visitNode(Node node, P param);
}
| 1,691 | 30.924528 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/NodeStream.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.ToIntFunction;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.internal.StreamImpl;
/**
* A sequence of AST nodes. Conceptually similar to a {@link Stream},
* and exposes a specialized API to navigate abstract syntax trees.
* This API replaces the defunct {@link Node#findChildNodesWithXPath(String)}.
*
* <h1>API usage</h1>
*
* <p>The {@link Node} interface exposes methods like {@link Node#children()}
* or {@link Node#asStream()} to obtain new NodeStreams. Null-safe construction
* methods are available here, see {@link #of(Node)}, {@link #of(Node[])},
* {@link #fromIterable(Iterable)}.
*
* <p>Most functions have an equivalent in the {@link Stream} interface
* and their behaviour is similar. One important departure from the
* {@link Stream} contract is the absence of requirement on the laziness
* of pipeline operations. More on that in the details section below.
*
* <p>Some additional functions are provided to iterate the axes of the
* tree: {@link #children()}, {@link #descendants()}, {@link #descendantsOrSelf()},
* {@link #parents()}, {@link #ancestors()}, {@link #ancestorsOrSelf()},
* {@link #precedingSiblings()}, {@link #followingSiblings()}.
* Filtering and mapping nodes by type is possible through {@link #filterIs(Class)},
* and the specialized {@link #children(Class)}, {@link #descendants(Class)},
* and {@link #ancestors(Class)}.
*
* <p>Many complex predicates about nodes can be expressed by testing
* the emptiness of a node stream. E.g. the following tests if the node
* is a variable declarator id initialized to the value {@code 0}:
* <pre>
* {@linkplain #of(Node) NodeStream.of}(someNode) <i>// the stream here is empty if the node is null</i>
* {@linkplain #filterIs(Class) .filterIs}(ASTVariableDeclaratorId.class)<i>// the stream here is empty if the node was not a variable declarator id</i>
* {@linkplain #followingSiblings() .followingSiblings}() <i>// the stream here contains only the siblings, not the original node</i>
* {@linkplain #take(int) .take}(1) <i>// the stream here contains only the first sibling, if it exists</i>
* {@linkplain #filterIs(Class) .filterIs}(ASTNumericLiteral.class)
* {@linkplain #filter(Predicate) .filter}(it -> !it.isFloatingPoint() && it.getValueAsInt() == 0)
* {@linkplain #nonEmpty() .nonEmpty}(); <i>// If the stream is non empty here, then all the pipeline matched</i>
* </pre>
*
* <p>Many existing operations from the node interface can be written with streams too:
* <ul>
* <li><tt>node.{@link Node#getFirstChildOfType(Class) getFirstChildOfType(t)} === node.{@link Node#children(Class) children(t)}.{@link #first()}</tt></li>
* <li><tt>node.{@link Node#getFirstDescendantOfType(Class) getFirstDescendantOfType(t)} === node.{@link Node#descendants(Class) descendants(t)}.{@link #first()}</tt></li>
* <li><tt>node.{@link Node#getFirstParentOfType(Class) getFirstParentOfType(t)} === node.{@link Node#ancestors(Class) ancestors(t)}.{@link #first()}</tt></li>
* <li><tt>node.{@link Node#findChildrenOfType(Class) findChildrenOfType(t)} === node.{@link Node#descendants(Class) children(t)}.{@link #toList()}</tt></li>
* <li><tt>node.{@link Node#findDescendantsOfType(Class) findDescendantsOfType(t)} === node.{@link Node#descendants(Class) descendants(t)}.{@link #toList()}</tt></li>
* <li><tt>node.{@link Node#getParentsOfType(Class) getParentsOfType(t)} === node.{@link Node#descendants(Class) ancestors(t)}.{@link #toList()}</tt></li>
* <li><tt>node.{@link Node#getNthParent(int) getNthParent(n)} === node.{@link Node#ancestors() ancestors()}.{@link #get(int) get(n - 1)}</tt></li>
* <li><tt>node.{@link Node#hasDescendantOfType(Class) hasDescendantOfType(t)} === node.{@link Node#descendants(Class) descendants(t)}.{@link #nonEmpty()}</tt></li>
* <li><tt>node.getFirstParentOfAnyType(c1, c2) === node.{@link Node#ancestors() ancestors()}.{@link #firstNonNull(Function) firstNonNull}({@link #asInstanceOf(Class, Class[]) asInstanceOf(c1, c2)})</tt></li>
* <li><tt>node.hasDescendantOfAnyType(c1, c2) === node.{@link Node#descendants() descendants()}.{@link #map(Function) map}({@link #asInstanceOf(Class, Class[]) asInstanceOf(c1, c2)}).{@link #nonEmpty()}</tt></li>
* </ul>
* The new way to write those is as efficient as the old way.
*
* <p>Unlike {@link Stream}s, NodeStreams can be iterated multiple times. That means, that the operations
* that are <i>terminal</i> in the Stream interface (i.e. consume the stream) don't consume NodeStreams.
* Be aware though, that node streams don't cache their results by default, so e.g. calling {@link #count()}
* followed by {@link #toList()} will execute the whole pipeline twice. The elements of a stream can
* however be {@linkplain #cached() cached} at an arbitrary point in the pipeline to evaluate the
* upstream only once. Some construction methods allow building a node stream from an external data
* source, e.g. {@link #fromIterable(Iterable) fromIterable}.
* Depending on how the data source is implemented, the built node streams may be iterable only once.
*
* <p>Node streams may contain duplicates, which can be pruned with {@link #distinct()}.
*
* <h1>Details</h1>
*
* <p>NodeStreams are not necessarily implemented with {@link Stream}, but
* when a method has an equivalent in the {@link Stream} API, their
* contract is similar. The only difference, is that node streams are not
* necessarily lazy, ie, a pipeline operation may be evaluated eagerly
* to improve performance. For this reason, relying on side-effects
* produced in the middle of the pipeline is a bad idea. {@link Stream}
* gives the same guideline about statefulness, but not for the same reason.
* Their justification is parallelism and operation reordering, once
* the pipeline is fully known.
*
* <p>Node streams are meant to be sequential streams, so there is no
* equivalent to {@link Stream#findAny()}. The method {@link #first()}
* is an equivalent to {@link Stream#findFirst()}. There is however a
* {@link #last()} method, which may be implemented efficiently on some
* streams (eg {@link #children()}). TODO maybe implement reverse
*
* <p>Node streams are most of the time ordered in document order (w.r.t. the XPath specification),
* a.k.a. prefix order. Some operations which explicitly manipulate the order of nodes, like
* {@link #union(NodeStream[]) union} or {@link #append(NodeStream) append}, may not preserve that ordering.
* {@link #map(Function) map} and {@link #flatMap(Function) flatMap} operations may not preserve the ordering
* if the stream has more than one element, since the mapping is applied in order to each element
* of the receiver stream. This extends to methods defined in terms of map or flatMap, e.g.
* {@link #descendants()} or {@link #children()}.
*
* @param <T> Type of nodes this stream contains. This parameter is
* covariant, which means for maximum flexibility, methods
* taking a node stream argument should declare it with an
* "extends" wildcard.
*
* @author Clément Fournier
* @implNote Choosing to wrap a stream instead of extending the interface is to
* allow the functions to return NodeStreams, and to avoid the code bloat
* induced by delegation.
*
* <p>The default implementation relies on the iterator method. From benchmarking,
* that appears more efficient than streams.
*
* @since 7.0.0
*/
public interface NodeStream<@NonNull T extends Node> extends Iterable<@NonNull T> {
/**
* Returns a node stream consisting of the results of replacing each
* node of this stream with the contents of a stream produced by the
* given mapping function. If a mapped stream is null, it is discarded.
*
* <p>If you want to flatMap this node stream to a {@link Stream} with
* arbitrary elements (ie not nodes), use {@link #toStream()} then
* {@link Stream#flatMap(Function)}.
*
* @param mapper A function mapping the elements of this stream to another stream
* @param <R> Type of nodes contained in the returned stream
*
* @return A flat mapped stream
*
* @see Stream#flatMap(Function)
*/
<R extends Node> NodeStream<R> flatMap(Function<? super T, ? extends @Nullable NodeStream<? extends R>> mapper);
// lazy pipeline transformations
/**
* Returns a node stream consisting of the results of applying the given
* mapping function to the node of this stream. If the mapping function
* returns null, the elements are not included.
*
* <p>If you want to map this node stream to a {@link Stream} with
* arbitrary elements (ie not nodes), use {@link #toStream()} then
* {@link Stream#map(Function)}.
*
* @param mapper A function mapping the elements of this stream to another node type
* @param <R> The node type of the new stream
*
* @return A mapped stream
*
* @see Stream#map(Function)
*/
<R extends Node> NodeStream<R> map(Function<? super T, ? extends @Nullable R> mapper);
/**
* Returns a node stream consisting of the nodes of this stream that match
* the given predicate.
*
* @param predicate A predicate to apply to each node to determine if
* it should be included
*
* @return A filtered node stream
*
* @see Stream#filter(Predicate)
* @see #filterNot(Predicate)
* @see #filterIs(Class)
* @see #filterMatching(Function, Object)
*/
NodeStream<T> filter(Predicate<? super @NonNull T> predicate);
/**
* Returns a stream consisting of the elements of this stream, additionally
* performing the provided action on each element as elements are consumed
* from the resulting stream. Note that terminal operations such as {@link #count()}
* don't necessarily execute the action.
*
* @param action an action to perform on the elements as they are consumed
* from the stream
*
* @return A new stream
*/
NodeStream<T> peek(Consumer<? super @NonNull T> action);
/**
* Returns a new node stream that contains all the elements of this stream, then
* all the elements of the given stream.
*
* @param right Other stream
*
* @return A concatenated stream
*
* @see #union(NodeStream[])
*/
NodeStream<T> append(NodeStream<? extends T> right);
/**
* Returns a new node stream that contains all the elements of the given stream,
* then all the elements of this stream.
*
* @param right Other stream
*
* @return A concatenated stream
*
* @see #union(NodeStream[])
*/
NodeStream<T> prepend(NodeStream<? extends T> right);
/**
* Returns a node stream containing all the elements of this node stream,
* but which will evaluate the upstream pipeline only once. The returned
* stream is not necessarily lazy, which means it may evaluate the upstream
* pipeline as soon as the call to this method is made.
*
* <p>This is useful e.g. if you want to call several terminal operations
* without executing the pipeline several times. For example,
*
* <pre>
*
* NodeStream<T> stream = NodeStream.of(...)
* <i>// long pipeline</i>
* <i>// ...</i>
* .cached()
* <i>// downstream</i>
* <i>// ...</i>
* ;
*
* stream.forEach(this::addViolation); <i>// both up- and downstream will be evaluated</i>
* curViolations += stream.count(); <i>// only downstream is evaluated</i>
* </pre>
*
* @return A cached node stream
*/
NodeStream<T> cached();
/**
* Returns a stream consisting of the elements of this stream,
* truncated to be no longer than maxSize in length.
*
* @param maxSize Maximum size of the returned stream
*
* @return A new node stream
*
* @throws IllegalArgumentException if n is negative
* @see Stream#limit(long)
* @see #drop(int)
*/
NodeStream<T> take(int maxSize);
/**
* Returns a stream consisting of the remaining elements of this
* stream after discarding the first n elements of the stream. If
* this stream contains fewer than n elements then an empty stream
* will be returned.
*
* @param n the number of leading elements to skip
*
* @return A new node stream
*
* @throws IllegalArgumentException if n is negative
* @see Stream#skip(long)
* @see #take(int)
* @see #dropLast(int)
*/
NodeStream<T> drop(int n);
/**
* Returns a stream consisting of the elements of this stream except
* the n tail elements. If n is greater than the number of elements
* of this stream, returns an empty stream. This requires a lookahead
* buffer in general.
*
* @param n the number of trailing elements to skip
*
* @return A new node stream
*
* @throws IllegalArgumentException if n is negative
* @see #drop(int)
*/
NodeStream<T> dropLast(int n);
/**
* Returns the longest prefix of elements that satisfy the given predicate.
*
* @param predicate The predicate used to test elements.
*
* @return the longest prefix of this stream whose elements all satisfy
* the predicate.
*/
NodeStream<T> takeWhile(Predicate<? super T> predicate);
/**
* Returns a stream consisting of the distinct elements (w.r.t
* {@link Object#equals(Object)}) of this stream.
*
* @return a stream consisting of the distinct elements of this stream
*/
NodeStream<T> distinct();
// tree navigation
/**
* Returns a node stream containing all the ancestors of the nodes
* contained in this stream. The returned stream doesn't preserve document
* order, since ancestors are yielded in innermost to outermost order.
*
* <p>This is equivalent to {@code flatMap(Node::ancestors)}.
*
* @return A stream of ancestors
*
* @see Node#ancestors()
* @see #ancestorsOrSelf()
* @see #ancestors(Class)
*/
default NodeStream<Node> ancestors() {
return flatMap(Node::ancestors);
}
/**
* Returns a node stream containing the nodes contained in this stream and their ancestors.
* The nodes of the returned stream are yielded in a depth-first fashion.
*
* <p>This is equivalent to {@code flatMap(Node::ancestorsOrSelf)}.
*
* @return A stream of ancestors
*
* @see #ancestors()
*/
default NodeStream<Node> ancestorsOrSelf() {
return flatMap(Node::ancestorsOrSelf);
}
/**
* Returns a node stream containing all the (first-degree) parents of the nodes
* contained in this stream.
*
* <p>This is equivalent to {@code map(Node::getParent)}.
*
* @return A stream of parents
*
* @see #ancestors()
* @see #ancestorsOrSelf()
*/
default NodeStream<Node> parents() {
return map(Node::getParent);
}
/**
* Returns a node stream containing all the children of the nodes
* contained in this stream.
*
* <p>This is equivalent to {@code flatMap(Node::children)}.
*
* @return A stream of children
*
* @see Node#children()
* @see #children(Class)
*/
default NodeStream<Node> children() {
return flatMap(Node::children);
}
/**
* Returns a node stream containing all the strict descendants of the nodes
* contained in this stream. See {@link DescendantNodeStream} for details.
*
* <p>This is equivalent to {@code flatMap(Node::descendants)}, except
* the returned stream is a {@link DescendantNodeStream}.
*
* @return A stream of descendants
*
* @see Node#descendants()
* @see #descendants(Class)
* @see #descendantsOrSelf()
*/
DescendantNodeStream<Node> descendants();
/**
* Returns a node stream containing the nodes contained in this stream and their descendants.
* See {@link DescendantNodeStream} for details.
*
* <p>This is equivalent to {@code flatMap(Node::descendantsOrSelf)}, except
* the returned stream is a {@link DescendantNodeStream}.
*
* @return A stream of descendants
*
* @see Node#descendantsOrSelf()
* @see #descendants()
*/
DescendantNodeStream<Node> descendantsOrSelf();
/**
* Returns a node stream containing all the following siblings of the nodes contained
* in this stream.
*
* @return A stream of siblings
*/
default NodeStream<Node> followingSiblings() {
return flatMap(StreamImpl::followingSiblings);
}
/**
* Returns a node stream containing all the preceding siblings of the nodes contained
* in this stream. The nodes are yielded from left to right, i.e. in document order.
*
* @return A stream of siblings
*/
default NodeStream<Node> precedingSiblings() {
return flatMap(StreamImpl::precedingSiblings);
}
/**
* Returns the {@linkplain #children() children stream} of each node
* in this stream, filtered by the given node type.
*
* <p>This is equivalent to {@code children().filterIs(rClass)}.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see #filterIs(Class)
* @see Node#children(Class)
*/
default <R extends Node> NodeStream<R> children(Class<? extends R> rClass) {
return flatMap(it -> it.children(rClass));
}
/**
* Returns a stream containing the first child of each of the nodes
* in this stream that has the given type.
*
* <p>This is equivalent to {@code flatMap(it -> it.children(rClass).take(1))}.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see Node#children(Class)
*/
default <R extends Node> NodeStream<R> firstChild(Class<? extends R> rClass) {
return flatMap(it -> it.children(rClass).take(1));
}
/**
* Returns the {@linkplain #descendants() descendant stream} of each node
* in this stream, filtered by the given node type. See {@link DescendantNodeStream}
* for details.
*
* <p>This is equivalent to {@code descendants().filterIs(rClass)}, except
* the returned stream is a {@link DescendantNodeStream}.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see #filterIs(Class)
* @see Node#descendants(Class)
*/
<R extends Node> DescendantNodeStream<R> descendants(Class<? extends R> rClass);
/**
* Returns the {@linkplain #ancestors() ancestor stream} of each node
* in this stream, filtered by the given node type.
*
* <p>This is equivalent to {@code ancestors().filterIs(rClass)}.
*
* @param rClass Type of node the returned stream should contain
* @param <R> Type of node the returned stream should contain
*
* @return A new node stream
*
* @see #filterIs(Class)
* @see Node#ancestors(Class)
*/
default <R extends Node> NodeStream<R> ancestors(Class<? extends R> rClass) {
return flatMap(it -> it.ancestors(rClass));
}
/**
* Filters the node of this stream using the negation of the given predicate.
*
* <p>This is equivalent to {@code filter(predicate.negate())}
*
* @param predicate A predicate to apply to each node to determine if
* it should be included
*
* @return A filtered node stream
*
* @see #filter(Predicate)
*/
default NodeStream<T> filterNot(Predicate<? super @NonNull T> predicate) {
return filter(predicate.negate());
}
// these are shorthands defined relative to filter
/**
* Filters the nodes of this stream by comparing a value extracted from the nodes
* with the given constant. This takes care of null value by calling
* {@link Objects#equals(Object, Object)}. E.g. to filter nodes that have
* the {@linkplain Node#getImage() image} {@code "a"}, use {@code filterMatching(Node::getImage, "a")}.
*
* <p>This is equivalent to {@code filter(t -> Objects.equals(extractor.apply(t), comparand))}.
*
* @param extractor Function extracting a value from the nodes of this stream
* @param comparand Value to which the extracted value will be compared
* @param <U> Type of value to compare
*
* @return A filtered node stream
*
* @see #filter(Predicate)
* @see #filterNotMatching(Function, Object)
*/
default <U> NodeStream<T> filterMatching(Function<? super @NonNull T, ? extends @Nullable U> extractor, U comparand) {
return filter(t -> Objects.equals(extractor.apply(t), comparand));
}
/**
* Filters the nodes of this stream that are a subtype of the given class.
*
* <p>This is equivalent to {@code filter(rClass::isInstance).map(rClass::cast)}.
*
* @param rClass The type of the nodes of the returned stream
* @param <R> The type of the nodes of the returned stream
*
* @return A filtered node stream
*
* @see #filter(Predicate)
* @see #asInstanceOf(Class, Class[])
*/
@SuppressWarnings("unchecked")
default <R extends Node> NodeStream<R> filterIs(Class<? extends R> rClass) {
return (NodeStream<R>) filter(rClass::isInstance);
}
/**
* Inverse of {@link #filterMatching(Function, Object)}.
*
* @param extractor Function extracting a value from the nodes of this stream
* @param comparand Value to which the extracted value will be compared
* @param <U> Type of value to compare
*
* @return A filtered node stream
*
* @see #filter(Predicate)
* @see #filterMatching(Function, Object)
*/
default <U> NodeStream<T> filterNotMatching(Function<? super @NonNull T, ? extends @Nullable U> extractor, U comparand) {
return filter(t -> !Objects.equals(extractor.apply(t), comparand));
}
// "terminal" operations
@Override
void forEach(Consumer<? super @NonNull T> action);
/**
* Reduce the elements of this stream sequentially.
*
* @param identity Identity element
* @param accumulate Combine an intermediate result with a new node from this stream,
* returns the next intermediate result
* @param <R> Result type
*
* @return The last intermediate result (identity if this stream is empty)
*/
default <R> R reduce(R identity, BiFunction<? super R, ? super T, ? extends R> accumulate) {
R result = identity;
for (T node : this) {
result = accumulate.apply(result, node);
}
return result;
}
/**
* Sum the elements of this stream by associating them to an integer.
*
* @param toInt Map an element to an integer, which will be added
* to the running sum
* returns the next intermediate result
*
* @return The sum, zero if the stream is empty.
*/
default int sumBy(ToIntFunction<? super T> toInt) {
int result = 0;
for (T node : this) {
result += toInt.applyAsInt(node);
}
return result;
}
/**
* Returns the number of nodes in this stream.
*
* @return the number of nodes in this stream
*/
// ASTs are not so big as to warrant using a 'long' here
int count();
/**
* Returns the sum of the value of the function applied to all
* elements of this stream.
*
* @param intMapper Mapping function
*
* @return The sum
*/
default int sumByInt(ToIntFunction<? super T> intMapper) {
int sum = 0;
for (T item : this) {
sum += intMapper.applyAsInt(item);
}
return sum;
}
/**
* Returns 'true' if the stream has at least one element.
*
* @return 'true' if the stream has at least one element.
*
* @see #isEmpty()
*/
boolean nonEmpty();
/**
* Returns 'true' if the stream has no elements.
*
* @return 'true' if the stream has no elements.
*
* @see #nonEmpty()
*/
default boolean isEmpty() {
return !nonEmpty();
}
/**
* Returns whether any elements of this stream match the provided predicate.
* If the stream is empty then false is returned and the predicate is not evaluated.
*
* @param predicate The predicate that one element should match for this method to return true
*
* @return true if any elements of the stream match the provided predicate, otherwise false
*
* @see #all(Predicate)
* @see #none(Predicate)
*/
boolean any(Predicate<? super T> predicate);
/**
* Returns whether no elements of this stream match the provided predicate.
* If the stream is empty then true is returned and the predicate is not evaluated.
*
* @param predicate The predicate that no element should match for this method to return true
*
* @return true if either no elements of the stream match the provided predicate or the stream is empty, otherwise false
*
* @see #any(Predicate)
* @see #all(Predicate)
*/
boolean none(Predicate<? super T> predicate);
/**
* Returns whether all elements of this stream match the provided predicate.
* If the stream is empty then true is returned and the predicate is not evaluated.
*
* @param predicate The predicate that all elements should match for this method to return true
*
* @return true if either all elements of the stream match the provided predicate or the stream is empty, otherwise false
*
* @see #any(Predicate)
* @see #none(Predicate)
*/
boolean all(Predicate<? super T> predicate);
/**
* Returns the element at index n in this stream.
* If no such element exists, {@code null} is returned.
*
* <p>This is equivalent to <tt>{@link #drop(int) drop(n)}.{@link #first()}</tt>
*
* <p>If you'd rather continue processing the nth element as a node stream,
* you can use <tt>{@link #drop(int) drop(n)}.{@link #take(int) take(1)}.</tt>
*
* @param n Index of the element to find
*
* @return The nth element of this stream, or {@code null} if it doesn't exist
*
* @throws IllegalArgumentException if n is negative
*/
default @Nullable T get(int n) {
return drop(n).first();
}
/**
* Returns the first element of this stream, or {@code null} if the
* stream is empty.
*
* <p>If you'd rather continue processing the first element as a node
* stream, you can use {@link #take(int) take(1)}.
*
* <p>This is equivalent to {@link #get(int) get(0)}.
*
* @return the first element of this stream, or {@code null} if it doesn't exist
*
* @see #first(Predicate)
* @see #first(Class)
* @see #firstOpt()
*/
@Nullable T first();
/**
* Returns the first element of this stream, or throws a {@link NoSuchElementException}
* if the stream is empty.
*
* @return the first element of this stream
*
* @see #first(Predicate)
* @see #first(Class)
* @see #firstOpt()
*/
@NonNull
default T firstOrThrow() {
T first = first();
if (first == null) {
throw new NoSuchElementException("Empty node stream");
}
return first;
}
/**
* Returns an optional containing the first element of this stream,
* or an empty optional if the stream is empty.
*
* <p>This is equivalent to {@code Optional.ofNullable(first())}.
*
* @return the first element of this stream, or an empty optional if it doesn't exist
*
* @see #first(Predicate)
* @see #first(Class)
* @see #first()
*/
default Optional<T> firstOpt() {
return Optional.ofNullable(first());
}
/**
* Returns the first element of this stream that matches the given
* predicate, or {@code null} if there is none.
*
* @param predicate The predicate that one element should match for
* this method to return it
*
* @return the first element of this stream that matches the given
* predicate, or {@code null} if it doesn't exist
*
* @see #first()
* @see #first(Class)
*/
default @Nullable T first(Predicate<? super T> predicate) {
return filter(predicate).first();
}
/**
* Returns the first element of this stream of the given type, or
* {@code null} if there is none.
*
* @param rClass The type of node to find
* @param <R> The type of node to find
*
* @return the first element of this stream of the given type, or {@code null} if it doesn't exist
*
* @see #first()
* @see #first(Predicate)
*/
default <R extends Node> @Nullable R first(Class<? extends R> rClass) {
return filterIs(rClass).first();
}
/**
* Returns the first element of this stream for which the mapping function
* returns a non-null result. Returns null if there is no such element.
* This is a convenience method to use with {@link #asInstanceOf(Class, Class[])},
* because using just {@link #map(Function) map} followed by {@link #first()}
* will lose the type information and mentioning explicit type arguments
* would be needed.
*
* @param nullableFun Mapper function
* @param <R> Result type
*
* @return A node, or null
*
* @see #asInstanceOf(Class, Class[])
*/
default <R extends Node> @Nullable R firstNonNull(Function<? super @NonNull T, ? extends @Nullable R> nullableFun) {
return map(nullableFun).first();
}
/**
* Returns the last element of this stream, or {@code null} if the
* stream is empty. This may or may not require traversing all the
* elements of the stream.
*
* @return the last element of this stream, or {@code null} if it doesn't exist
*/
@Nullable T last();
/**
* Returns the last element of this stream of the given type, or
* {@code null} if there is none.
*
* @param rClass The type of node to find
* @param <R> The type of node to find
*
* @return the last element of this stream of the given type, or {@code null} if it doesn't exist
*
* @see #last()
*/
default <R extends Node> @Nullable R last(Class<? extends R> rClass) {
return filterIs(rClass).last();
}
/**
* Collects the elements of this node stream using the specified {@link Collector}.
* This is equivalent to {@link #toStream()} followed by {@link Stream#collect(Collector)}.
*
* @param <R> the type of the result
* @param <A> the intermediate accumulation type of the {@code Collector}
* @param collector the {@code Collector} describing the reduction
*
* @return the result of the reduction
*
* @see Stream#collect(Collector)
* @see java.util.stream.Collectors
* @see #toList()
* @see #toList(Function)
*/
<R, A> R collect(Collector<? super T, A, R> collector);
/**
* Returns a new stream of Ts having the pipeline of operations
* defined by this node stream. This can be called multiple times.
*
* @return A stream containing the same elements as this node stream
*/
Stream<@NonNull T> toStream();
/**
* Collects the elements of this node stream into a list. Just like
* for {@link Collectors#toList()}, there are no guarantees on the
* type, mutability, serializability, or thread-safety of the returned
* list.
*
* <p>This is equivalent to {@code collect(Collectors.toList())}.
*
* @return a list containing the elements of this stream
*
* @see Collectors#toList()
* @see #collect(Collector)
*/
default List<T> toList() {
return collect(Collectors.toList());
}
/**
* Maps the elements of this node stream using the given mapping
* and collects the results into a list.
*
* <p>This is equivalent to {@code collect(Collectors.mapping(mapper, Collectors.toList()))}.
*
* @param mapper Mapping function
* @param <R> Return type of the mapper, and element type of the returned list
*
* @return a list containing the elements of this stream
*
* @see Collectors#mapping(Function, Collector)
* @see #collect(Collector)
*/
default <R> List<R> toList(Function<? super T, ? extends R> mapper) {
return collect(Collectors.mapping(mapper, Collectors.toList()));
}
/**
* Returns a node stream containing zero or one node,
* depending on whether the argument is null or not.
*
* <p>If you know the node is not null, you can also
* call <tt>node.{@link Node#asStream() asStream()}</tt>.
*
* @param node The node to contain
* @param <T> Element type of the returned stream
*
* @return A new node stream
*
* @see Node#asStream()
*/
static <T extends Node> NodeStream<T> of(@Nullable T node) {
// overload the varargs to avoid useless array creation
return node == null ? empty() : StreamImpl.singleton(node);
}
// construction
// we ensure here that no node stream may contain null values
/**
* Returns a new node stream that contains the same elements as the given
* iterable. Null items are filtered out of the resulting stream.
*
* <p>It's possible to map an iterator to a node stream by calling
* {@code fromIterable(() -> iterator)}, but then the returned node stream
* would only be iterable once.
*
* @param iterable Source of nodes
* @param <T> Type of nodes in the returned node stream
*
* @return A new node stream
*/
static <T extends Node> NodeStream<T> fromIterable(Iterable<? extends @Nullable T> iterable) {
return StreamImpl.fromIterable(iterable);
}
/**
* Returns a node stream containing zero or one node,
* depending on whether the optional is empty or not.
*
* @param optNode The node to contain
* @param <T> Element type of the returned stream
*
* @return A new node stream
*
* @see #of(Node)
*/
static <T extends Node> NodeStream<T> ofOptional(Optional<? extends T> optNode) {
return optNode.map(StreamImpl::<T>singleton).orElseGet(StreamImpl::empty);
}
/**
* Returns a node stream whose elements are the given nodes
* in order. Null elements are not part of the resulting node
* stream.
*
* @param nodes The elements of the new stream
* @param <T> Element type of the returned stream
*
* @return A new node stream
*/
@SafeVarargs
static <T extends Node> NodeStream<T> of(T... nodes) {
return fromIterable(Arrays.asList(nodes));
}
/**
* Returns a node stream containing all the elements of the given streams,
* one after the other.
*
* @param <T> The type of stream elements
* @param streams the streams to flatten
*
* @return the concatenation of the input streams
*/
@SafeVarargs
static <T extends Node> NodeStream<T> union(NodeStream<? extends T>... streams) {
return union(Arrays.asList(streams));
}
/**
* Returns a node stream containing all the elements of the given streams,
* one after the other.
*
* @param <T> The type of stream elements
* @param streams the streams to flatten
*
* @return the concatenation of the input streams
*/
static <T extends Node> NodeStream<T> union(Iterable<? extends NodeStream<? extends T>> streams) {
return StreamImpl.union(streams);
}
/**
* Returns an empty node stream.
*
* @param <T> Expected type of nodes.
*
* @return An empty node stream
*/
static <T extends Node> NodeStream<T> empty() {
return StreamImpl.empty();
}
/**
* Applies the given mapping functions to the given upstream in order and merges the
* results into a new node stream. This allows exploring several paths at once on the
* same stream. The method is lazy and won't evaluate the upstream pipeline several times.
*
* @param upstream Source of the stream
* @param fst First mapper
* @param snd Second mapper
* @param rest Rest of the mappers
* @param <R> Common supertype for the element type of the streams returned by the mapping functions
*
* @return A merged node stream
*/
@SafeVarargs // this method is static because of the generic varargs
static <T extends Node, R extends Node> NodeStream<R> forkJoin(NodeStream<? extends T> upstream,
Function<? super @NonNull T, ? extends NodeStream<? extends R>> fst,
Function<? super @NonNull T, ? extends NodeStream<? extends R>> snd,
Function<? super @NonNull T, ? extends NodeStream<? extends R>>... rest) {
Objects.requireNonNull(fst);
Objects.requireNonNull(snd);
List<Function<? super T, ? extends NodeStream<? extends R>>> mappers = new ArrayList<>(rest.length + 2);
mappers.add(fst);
mappers.add(snd);
mappers.addAll(Arrays.asList(rest));
Function<? super T, NodeStream<R>> aggregate =
t -> NodeStream.<R>union(mappers.stream().map(f -> f.apply(t)).collect(Collectors.toList()));
// with forkJoin we know that the stream will be iterated more than twice so we cache the values
return upstream.cached().flatMap(aggregate);
}
/**
* Returns a map function, that checks whether the parameter is an
* instance of any of the given classes. If so, it returns the parameter,
* otherwise it returns null.
*
* <p>This may be used to filter a node stream to those specific
* classes, for example:
*
* <pre>{@code
* NodeStream<ASTExpression> exprs = someStream.map(asInstanceOf(ASTInfixExpression.class, ASTCastExpression.class));
* }</pre>
*
* Using this in the middle of a call chain might require passing
* explicit type arguments:
*
* <pre>{@code
* ASTAnyTypeDeclaration ts =
* node.ancestors()
* .<ASTAnyTypeDeclaration>map(asInstanceOf(ASTClassOrInterfaceDeclaration.class, ASTEnumDeclaration.class))
* .first(); // would not compile without the explicit type arguments
* }</pre>
*
* <p>For this use case the {@link #firstNonNull(Function)} method
* may be used, which reduces the above to
*
* <pre>{@code
* ASTAnyTypeDeclaration ts =
* node.ancestors().firstNonNull(asInstanceOf(ASTClassOrInterfaceDeclaration.class, ASTEnumDeclaration.class));
* }</pre>
*
* @param c1 First type to test
* @param rest Other types to test
* @param <O> Output type
*
* @see #firstNonNull(Function)
*/
@SafeVarargs // this method is static because of the generic varargs
@SuppressWarnings("unchecked")
static <O> Function<@Nullable Object, @Nullable O> asInstanceOf(Class<? extends O> c1, Class<? extends O>... rest) {
if (rest.length == 0) {
return obj -> c1.isInstance(obj) ? (O) obj : null;
}
return obj -> {
if (c1.isInstance(obj)) {
return (O) obj;
}
for (Class<? extends O> aClass : rest) {
if (aClass.isInstance(obj)) {
return (O) obj;
}
}
return null;
};
}
/**
* A specialization of {@link NodeStream} that allows configuring
* tree traversal behaviour when traversing the descendants of a node.
* Such a stream is returned by methods such as {@link Node#descendants()}.
* When those methods are called on a stream containing more than one
* element (eg {@link NodeStream#descendants()}), the configuration
* applies to each individual traversal.
*
* <p>By default, traversal is performed depth-first (prefix order). Eg
* <pre>{@code
* A
* + B
* + C
* + D
* + E
* + F
* }</pre>
* is traversed in the order {@code A, B, C, D, E, F}.
*
* <p>By default, traversal also does not cross {@linkplain #crossFindBoundaries(boolean) find boundaries}.
*
* @param <T> Type of node this stream contains
*/
interface DescendantNodeStream<T extends Node> extends NodeStream<T> {
// TODO stop recursion on an arbitrary boundary
// TODO breadth-first traversal
/**
* Returns a node stream that will not stop the tree traversal
* when encountering a find boundary. Find boundaries are node
* that by default stop tree traversals, like class declarations.
* They are identified via {@link Node#isFindBoundary()}.
*
* <p>For example, supposing you have the AST node for the following
* method:
* <pre>{@code
* void method() {
* String outer = "before";
*
* class Local {
* void localMethod() {
* String local = "local";
* }
* }
*
* String after = "after";
* }
* }</pre>
* Then the stream {@code method.descendants(ASTStringLiteral.class)}
* will only yield the literals {@code "before"} and {@code "after"},
* because the traversal doesn't go below the local class.
*
* <p>Note that traversal is stopped only for the subtree of the
* find boundary, but continues on the siblings. This is why
* {@code "after"} is yielded. This is also why {@link #takeWhile(Predicate)}
* is not a substitute for this method: {@code method.descendants(ASTStringLiteral.class).takeWhile(it -> !it.isFindBoundary)}
* would yield only {@code "before"}.
*
* <p>This behaviour can be opted out of with this method. In the
* example, the stream {@code method.descendants(ASTStringLiteral.class).crossFindBoundaries()}
* will yield {@code "before"}, {@code "local"} and {@code "after"}
* literals.
*
* @param cross If true, boundaries will be crossed.
*
* @return A new node stream
*/
DescendantNodeStream<T> crossFindBoundaries(boolean cross);
/**
* An alias for {@link #crossFindBoundaries(boolean) crossFindBoundaries(true)}.
*
* @return A new node stream
*/
default DescendantNodeStream<T> crossFindBoundaries() {
return crossFindBoundaries(true);
}
}
}
| 44,946 | 35.189211 | 214 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/SemanticException.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
/**
* An error that occurs after validating a file.
*/
public class SemanticException extends FileAnalysisException {
public SemanticException() {
super();
}
public SemanticException(String message) {
super(message);
}
public SemanticException(Throwable cause) {
super(cause);
}
public SemanticException(String message, Throwable cause) {
super(message, cause);
}
}
| 563 | 19.142857 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/RootNode.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
/**
* This interface identifies the root node of an AST. Each language
* implementation must ensure that every AST its parser produces has
* a RootNode as its root, and that there is no other RootNode instance
* in the tree.
*/
public interface RootNode extends Node {
@Override
AstInfo<? extends RootNode> getAstInfo();
}
| 467 | 22.4 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/Parser.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.Objects;
import net.sourceforge.pmd.lang.LanguageProcessor;
import net.sourceforge.pmd.lang.LanguageProcessorRegistry;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* Produces an AST from a source file. Instances of this interface must
* be stateless (which makes them trivially threadsafe).
*/
public interface Parser {
/**
* Parses an entire tree for this language. This may perform some
* semantic analysis, like name resolution.
*
* @param task Description of the parsing task
*
* @return The root of the tree corresponding to the source code.
*
* @throws IllegalArgumentException If the language version of the
* parsing task is for an incorrect language
* @throws FileAnalysisException If any error occurs
*/
RootNode parse(ParserTask task) throws FileAnalysisException;
/**
* Parameters passed to a parsing task.
*/
final class ParserTask {
private final TextDocument textDoc;
private final SemanticErrorReporter reporter;
private final LanguageProcessorRegistry lpRegistry;
public ParserTask(TextDocument textDoc, SemanticErrorReporter reporter, LanguageProcessorRegistry lpRegistry) {
this.textDoc = AssertionUtil.requireParamNotNull("Text document", textDoc);
this.reporter = AssertionUtil.requireParamNotNull("reporter", reporter);
this.lpRegistry = AssertionUtil.requireParamNotNull("lpRegistry", lpRegistry);
Objects.requireNonNull(lpRegistry.getProcessor(textDoc.getLanguageVersion().getLanguage()));
}
public LanguageVersion getLanguageVersion() {
return textDoc.getLanguageVersion();
}
/**
* The display name for where the file comes from. This should
* not be interpreted, it may not be a file-system path.
*/
public String getFileDisplayName() {
return textDoc.getFileId().getOriginalPath();
}
/**
* The text document to parse.
*/
public TextDocument getTextDocument() {
return textDoc;
}
/**
* The full text of the file to parse.
*/
public String getSourceText() {
return getTextDocument().getText().toString();
}
/**
* The error reporter for semantic checks.
*/
public SemanticErrorReporter getReporter() {
return reporter;
}
public LanguageProcessorRegistry getLpRegistry() {
return lpRegistry;
}
public LanguageProcessor getLanguageProcessor() {
return lpRegistry.getProcessor(getLanguageVersion().getLanguage());
}
public ParserTask withTextDocument(TextDocument textDocument) {
return new ParserTask(
textDocument,
this.reporter,
this.lpRegistry
);
}
}
}
| 3,259 | 30.047619 | 119 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/TokenMgrError.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.lang.document.FileId;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.util.StringUtil;
/**
* An error thrown during lexical analysis of a file.
*/
public final class TokenMgrError extends FileAnalysisException {
private final int line;
private final int column;
/**
* Create a new exception.
*
* @param line Line number
* @param column Column number
* @param filename Filename. If unknown, it can be completed with {@link #setFileName(String)} later
* @param message Message of the error
* @param cause Cause of the error, if any
*/
public TokenMgrError(int line, int column, @Nullable FileId filename, String message, @Nullable Throwable cause) {
super(message, cause);
this.line = line;
this.column = column;
if (filename != null) {
super.setFileId(filename);
}
}
/**
* Constructor called by JavaCC.
*/
@InternalApi
public TokenMgrError(boolean eofSeen, String lexStateName, int errorLine, int errorColumn, String errorAfter, char curChar) {
super(makeReason(eofSeen, lexStateName, errorAfter, curChar));
line = errorLine;
column = errorColumn;
}
public int getLine() {
return line;
}
public int getColumn() {
return column;
}
@Override
protected @NonNull FileLocation location() {
return FileLocation.caret(getFileId(), line, column);
}
@Override
protected String errorKind() {
return "Lexical error";
}
/**
* Replace the file name of this error.
*
* @param fileId New filename
*
* @return A new exception
*/
@Override
public TokenMgrError setFileId(FileId fileId) {
super.setFileId(fileId);
return this;
}
private static String makeReason(boolean eofseen, String lexStateName, String errorAfter, char curChar) {
String message;
if (eofseen) {
message = "<EOF> ";
} else {
message = "\"" + StringUtil.escapeJava(String.valueOf(curChar)) + "\"" + " (" + (int) curChar + "), ";
}
message += "after : \"" + StringUtil.escapeJava(errorAfter) + "\" (in lexical state " + lexStateName + ")";
return message;
}
}
| 2,667 | 27.382979 | 129 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/ParseException.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.stream.Collectors;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.impl.javacc.JavaccToken;
import net.sourceforge.pmd.lang.ast.impl.javacc.JavaccTokenDocument;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.reporting.Reportable;
import net.sourceforge.pmd.util.StringUtil;
public class ParseException extends FileAnalysisException {
/**
* This is the last token that has been consumed successfully. If
* this object has been created due to a parse error, the token
* followng this token will (therefore) be the first error token.
*/
private @Nullable FileLocation location;
public ParseException(String message) {
super(message);
this.location = null;
}
public ParseException(Throwable cause) {
super(cause);
this.location = null;
}
/**
* This constructor is called by Javacc.
*/
public ParseException(@NonNull JavaccToken currentTokenVal,
int[][] expectedTokenSequencesVal) {
super(makeMessage(currentTokenVal, expectedTokenSequencesVal));
location = currentTokenVal.getNext().getReportLocation();
}
public ParseException withLocation(FileLocation loc) {
location = loc;
super.setFileId(loc.getFileId());
return this;
}
public ParseException withLocation(Reportable reportable) {
return withLocation(reportable.getReportLocation());
}
@Override
protected String errorKind() {
return "Parse exception";
}
@Override
protected @Nullable FileLocation location() {
return location;
}
/**
* It uses "currentToken" and "expectedTokenSequences" to generate a parse
* error message and returns it. If this object has been created
* due to a parse error, and you do not catch it (it gets thrown
* from the parser) the correct error message
* gets displayed.
*/
private static String makeMessage(@NonNull JavaccToken currentToken,
int[][] expectedTokenSequences) {
JavaccTokenDocument document = currentToken.getDocument();
String eol = System.lineSeparator();
Set<String> expectedBranches = new LinkedHashSet<>();
int maxSize = 0;
for (int[] expectedTokenSequence : expectedTokenSequences) {
StringBuilder expected = new StringBuilder();
if (maxSize < expectedTokenSequence.length) {
maxSize = expectedTokenSequence.length;
}
for (int i : expectedTokenSequence) {
expected.append(document.describeKind(i)).append(' ');
}
if (expectedTokenSequence[expectedTokenSequence.length - 1] != 0) {
expected.append("...");
}
expectedBranches.add(expected.toString());
}
String expected = expectedBranches.stream().collect(Collectors.joining(System.lineSeparator() + " "));
StringBuilder retval = new StringBuilder("Encountered ");
if (maxSize > 1) {
retval.append('[');
}
JavaccToken tok = currentToken.next;
for (int i = 0; i < maxSize; i++) {
if (i != 0) {
retval.append(' ');
}
if (tok.kind == 0) {
retval.append(document.describeKind(0));
break;
}
String kindStr = document.describeKind(tok.kind);
String image = StringUtil.escapeJava(tok.getImage());
retval.append(kindStr);
if (!isEnquotedVersion(kindStr, image)) {
// then it's an angle-braced name
retval.deleteCharAt(retval.length() - 1); // remove '>'
retval.append(": \"");
retval.append(image);
retval.append("\">");
}
tok = tok.next;
}
if (maxSize > 1) {
retval.append(']');
}
retval.append('.').append(eol);
if (expectedTokenSequences.length == 1) {
retval.append("Was expecting:").append(eol).append(" ");
} else {
retval.append("Was expecting one of:").append(eol).append(" ");
}
retval.append(expected);
return retval.toString();
}
private static boolean isEnquotedVersion(String kindStr, String image) {
return kindStr.equals('"' + image + '"');
}
}
| 4,808 | 31.493243 | 113 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/TokenDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.TextDocument;
/**
* Token layer of a parsed file.
*/
@Experimental
public abstract class TokenDocument<T extends GenericToken> {
private final TextDocument textDocument;
public TokenDocument(TextDocument textDocument) {
this.textDocument = textDocument;
}
/** Returns the original text of the file (without escaping). */
public Chars getFullText() {
return textDocument.getText();
}
public TextDocument getTextDocument() {
return textDocument;
}
/**
* Returns the first token of the token chain.
*
* @throws IllegalStateException If the document has not been parsed yet
*/
public abstract T getFirstToken();
}
| 1,027 | 23.47619 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/AbstractNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl;
import org.apache.commons.lang3.ArrayUtils;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.lang.ast.internal.StreamImpl;
import net.sourceforge.pmd.util.DataMap;
import net.sourceforge.pmd.util.DataMap.DataKey;
/**
* Base class for implementations of the Node interface whose children
* are stored in an array. This class provides the basic utilities to
* link children and parent. It's used by most most nodes, but currently
* not the antlr nodes, so downcasting {@link Node} to this class may fail
* and is very bad practice.
*
* @param <B> Self type (eg AbstractJavaNode in the java module), this
* must ultimately implement {@code <N>}, though the java type
* system does not allow us to express that
* @param <N> Public interface for nodes of this language (eg JavaNode
* in the java module).
*/
public abstract class AbstractNode<B extends AbstractNode<B, N>,
// node the Node as first bound here is to make casts from Node to N noops at runtime.
N extends Node & GenericNode<N>> implements GenericNode<N> {
private static final Node[] EMPTY_ARRAY = new Node[0];
// lazy initialized, many nodes don't need it
private @Nullable DataMap<DataKey<?, ?>> userData;
// never null, never contains null elements
private Node[] children = EMPTY_ARRAY;
private B parent;
private int childIndex;
protected AbstractNode() {
// only for subclassing
}
@Override
public final N getParent() {
return (N) parent;
}
@Override
public final int getIndexInParent() {
return childIndex;
}
@Override
public final N getChild(final int index) {
return (N) children[index];
}
@Override
public final int getNumChildren() {
return children.length;
}
protected void setParent(final B parent) {
this.parent = parent;
}
@SuppressWarnings("unchecked")
private B asSelf(Node n) {
return (B) n;
}
/**
* Set the child at the given index to the given node. This resizes
* the children array to be able to contain the given index. Implementations
* must take care that this does not leave any "holes" in the array.
* This method throws if there is already a child at the given index.
*
* <p>Note that it is more efficient to add children in reverse
* (from right to left), because the array is resized only the
* first time.
*
* <p>This method also calls {@link #setParent(AbstractNode)}.
*
* @param child The child to add
* @param index The index to which the child will be added
*/
protected void addChild(final B child, final int index) {
assert index >= 0 : "Invalid index " + index;
assert index >= children.length || children[index] == null : "There is already a child at index " + index;
if (index >= children.length) {
final Node[] newChildren = new Node[index + 1];
System.arraycopy(children, 0, newChildren, 0, children.length);
children = newChildren;
}
setChild(child, index);
}
/**
* Set the child at the given index. The difference with {@link #addChild(AbstractNode, int) addChild}
* is that the index must exist, while addChild may resizes the array.
*/
protected void setChild(final B child, final int index) {
assert index >= 0 && index < children.length : "Invalid index " + index + " for length " + children.length;
children[index] = child;
child.setChildIndex(index);
child.setParent(asSelf(this));
}
/**
* Insert a child at the given index, shifting all the following
* children to the right.
*
* @param child New child
* @param index Index (must be 0 <= index <= getNumChildren()), ie
* you can insert a node beyond the end, because that
* would leave holes in the array
*/
protected void insertChild(final B child, final int index) {
assert index >= 0 && index <= children.length
: "Invalid index for insertion into array of length " + children.length + ": " + index;
Node[] newChildren = new Node[children.length + 1];
if (index != 0) {
System.arraycopy(children, 0, newChildren, 0, index);
}
if (index != children.length) {
System.arraycopy(children, index, newChildren, index + 1, children.length - index);
}
newChildren[index] = child;
child.setParent(asSelf(this));
for (int i = index; i < newChildren.length; i++) {
asSelf(newChildren[i]).setChildIndex(i);
}
this.children = newChildren;
}
protected void remove() {
// Detach current node of its parent, if any
if (parent != null) {
parent.removeChildAtIndex(getIndexInParent());
setParent(null);
}
// TODO [autofix]: Notify action for handling text edition
}
protected void removeChildAtIndex(final int childIndex) {
if (0 <= childIndex && childIndex < getNumChildren()) {
// Remove the child at the given index
children = ArrayUtils.remove(children, childIndex);
// Update the remaining & left-shifted children indexes
for (int i = childIndex; i < getNumChildren(); i++) {
asSelf(getChild(i)).setChildIndex(i);
}
}
}
/**
* Sets the index of this node from the perspective of its parent. This
* means: this.getParent().getChild(index) == this.
*
* @param index the child index
*/
void setChildIndex(final int index) {
childIndex = index;
}
@Override
public DataMap<DataKey<?, ?>> getUserMap() {
if (userData == null) {
userData = DataMap.newDataMap();
}
return userData;
}
@Override
public String toString() {
return getXPathNodeName();
}
@Override
public final NodeStream<N> children() {
// Since this is used as a core part of tree traversal, the implementation
// here is optimized. Importantly, this method is final and the
// implementation returns always an instance of the same type, so
// that the allocation can be eliminated, and the iterator call devirtualized.
return StreamImpl.childrenArray(this, children);
}
@Override
@SuppressWarnings("unchecked")
public final <R extends Node> @Nullable R firstChild(Class<? extends R> rClass) {
// This operation is extremely common so we give it an optimal
// implementation, based directly on the array. This will never
// create a node stream object, and array bounds are not checked.
// It's final so it can be inlined.
for (Node child : children) {
if (rClass.isInstance(child)) {
// rClass.cast(child) is more expensive than this
// unchecked cast, which we know is safe.
return (R) child;
}
}
return null;
}
}
| 7,427 | 33.388889 | 115 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/GenericNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.lang.ast.NodeStream.DescendantNodeStream;
import net.sourceforge.pmd.lang.ast.internal.StreamImpl;
/**
* Interface that binds the return type of some node methods to a type
* parameter. This enforces that eg all children of such a node are from
* the same hierarchy (eg Java nodes only have Java nodes as parent, or
* as children).
*
* <p>Although subinterfaces like JavaNode profit from the added type
* information, the Node interface and its usages in language-independent
* code would suffer from adding a type parameter directly to {@link Node}.
*
* <p>Type safety of the unchecked casts here is the responsibility of
* the implementation, it should check that methods like setParent or
* addChild add an instance of {@code <N>}.
*
* @param <N> Self type (eg JavaNode)
*/
@SuppressWarnings("unchecked")
public interface GenericNode<N extends GenericNode<N>> extends Node {
@Override
N getChild(int index);
@Override
N getParent();
@Override
@Nullable
default N getFirstChild() {
return getNumChildren() > 0 ? getChild(0) : null;
}
@Override
@Nullable
default N getLastChild() {
return getNumChildren() > 0 ? getChild(getNumChildren() - 1) : null;
}
@Override
default NodeStream<N> asStream() {
return StreamImpl.singleton((N) this);
}
@Override
default N getNthParent(int n) {
return (N) Node.super.getNthParent(n);
}
@Override
default NodeStream<N> children() {
return (NodeStream<N>) Node.super.children();
}
@Override
default DescendantNodeStream<N> descendants() {
return (DescendantNodeStream<N>) Node.super.descendants();
}
@Override
default DescendantNodeStream<N> descendantsOrSelf() {
return (DescendantNodeStream<N>) Node.super.descendantsOrSelf();
}
@Override
default NodeStream<N> ancestorsOrSelf() {
return (NodeStream<N>) Node.super.ancestorsOrSelf();
}
@Override
default NodeStream<N> ancestors() {
return (NodeStream<N>) Node.super.ancestors();
}
@Override
default @Nullable N getPreviousSibling() {
return (N) Node.super.getPreviousSibling();
}
@Override
default @Nullable N getNextSibling() {
return (N) Node.super.getNextSibling();
}
}
| 2,644 | 26.268041 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/EscapeTranslator.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import static java.lang.Integer.min;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.FragmentedDocBuilder;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* An object that can translate an input document into an output document,
* typically by replacing escape sequences with the character they represent.
*
* <p>This is an abstract class because the default implementation does not
* perform any escape processing. Subclasses refine this behavior.
*/
@SuppressWarnings("PMD.AssignmentInOperand")
public abstract class EscapeTranslator {
// Note that this can easily be turned into a java.io.Reader with
// efficient block IO, optimized for the common case where there are
// few or no escapes. This is part of the history of this file, but
// was removed for simplicity.
/**
* Source characters. When there is an escape, eg \ u00a0, the
* first backslash is replaced with the translated value of the
* escape. The bufpos is updated so that we read the next char
* after the escape.
*/
protected Chars input;
/** Position of the next char to read in the input. */
protected int bufpos;
/** Keep track of adjustments to make to the offsets, caused by unicode escapes. */
final FragmentedDocBuilder builder;
private Chars curEscape;
private int offInEscape;
/**
* Create a translator that will read from the given document.
*
* @param original Original document
*
* @throws NullPointerException If the parameter is null
*/
public EscapeTranslator(TextDocument original) {
AssertionUtil.requireParamNotNull("builder", original);
this.input = original.getText();
this.bufpos = 0;
this.builder = new FragmentedDocBuilder(original);
}
/**
* Translate all the input in the buffer. This consumes this object.
*
* @return The translated text document. If there is no escape, returns the original text
*
* @throws IllegalStateException If this method is called more than once on the same object
* @throws MalformedSourceException If there are invalid escapes in the source
*/
public TextDocument translateDocument() throws MalformedSourceException {
ensureOpen();
try {
return translateImpl();
} finally {
close();
}
}
private TextDocument translateImpl() {
if (this.bufpos == input.length()) {
return builder.build();
}
final int len = input.length(); // remove Integer.MAX_VALUE
int readChars = 0;
while (readChars < len && (this.bufpos < input.length() || curEscape != null)) {
if (curEscape != null) {
int toRead = min(len - readChars, curEscape.length() - offInEscape);
readChars += toRead;
offInEscape += toRead;
if (curEscape.length() == offInEscape) {
curEscape = null;
continue;
} else {
break; // len cut us off, we'll retry next time
}
}
int bpos = this.bufpos;
int nextJump = gobbleMaxWithoutEscape(min(input.length(), bpos + len - readChars));
int newlyReadChars = nextJump - bpos;
assert newlyReadChars >= 0 && (readChars + newlyReadChars) <= len;
if (newlyReadChars == 0 && nextJump == input.length()) {
// eof
break;
}
readChars += newlyReadChars;
}
return builder.build();
}
/**
* Returns the max offset, EXclusive, up to which we can cut the input
* array from the bufpos to dump it into the output array.
*
* @param maxOff Max offset up to which to read ahead
*/
protected int gobbleMaxWithoutEscape(int maxOff) throws MalformedSourceException {
this.bufpos = maxOff;
return maxOff;
}
protected int recordEscape(final int startOffsetInclusive, int endOffsetExclusive, Chars translation) {
assert endOffsetExclusive > startOffsetInclusive && startOffsetInclusive >= 0;
this.builder.recordDelta(startOffsetInclusive, endOffsetExclusive, translation);
this.bufpos = endOffsetExclusive;
this.curEscape = translation;
this.offInEscape = 0;
return startOffsetInclusive;
}
/**
* Closing a translator does not close the underlying document, it just
* clears the intermediary state.
*/
private void close() {
this.bufpos = -1;
this.input = null;
}
/** Check to make sure that the stream has not been closed */
protected final void ensureOpen() {
if (input == null) {
throw new IllegalStateException("Closed");
}
}
protected FileLocation locationAt(int indexInInput) {
return builder.toLocation(indexInInput);
}
}
| 5,267 | 32.769231 | 107 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/BackslashEscapeTranslator.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import static java.lang.Integer.min;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.TextDocument;
/**
* A base class for readers that handle escapes starting with a backslash.
*/
public abstract class BackslashEscapeTranslator extends EscapeTranslator {
private static final char BACKSLASH = '\\';
/**
* An offset until which we read backslashes and decided they were not
* an escape. The read procedure may cut off in the middle of the escape,
* and turn an even num of backslashes into an odd one, so until we crossed
* this offset, backslashes are not treated specially.
*/
private int savedNotEscapeSpecialEnd = Integer.MAX_VALUE;
public BackslashEscapeTranslator(TextDocument builder) {
super(builder);
}
@Override
protected int gobbleMaxWithoutEscape(final int maxOff) throws MalformedSourceException {
int off = this.bufpos;
boolean seenBackslash = true;
int notEscapeEnd = this.savedNotEscapeSpecialEnd;
while (off < maxOff) {
seenBackslash = input.charAt(off) == BACKSLASH && notEscapeEnd >= off;
if (seenBackslash) {
break;
}
off++;
}
if (!seenBackslash || off == maxOff) {
this.bufpos = off;
return off;
}
return handleBackslash(maxOff, off);
}
protected abstract int handleBackslash(int maxOff, int firstBackslashOff) throws MalformedSourceException;
@Override
protected int recordEscape(int startOffsetInclusive, int endOffsetExclusive, Chars translation) {
this.savedNotEscapeSpecialEnd = Integer.MAX_VALUE;
return super.recordEscape(startOffsetInclusive, endOffsetExclusive, translation);
}
protected int abortEscape(int off, int maxOff) {
// not an escape sequence
int min = min(maxOff, off);
// save the number of backslashes that are part of the escape,
// might have been cut in half by the maxReadahead
this.savedNotEscapeSpecialEnd = min < off ? off : Integer.MAX_VALUE;
this.bufpos = min;
return min;
}
}
| 2,336 | 31.458333 | 110 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/package-info.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/**
* Support classes for language implementations based on JavaCC.
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
| 211 | 22.555556 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JjtreeParserAdapter.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import net.sourceforge.pmd.lang.ast.FileAnalysisException;
import net.sourceforge.pmd.lang.ast.ParseException;
import net.sourceforge.pmd.lang.ast.Parser;
import net.sourceforge.pmd.lang.ast.RootNode;
/**
* Base implementation of the {@link Parser} interface for JavaCC language
* implementations. This wraps a parser generated by JavaCC, it's not meant
* as a base class for the generated parser.
*
* @param <R> Type of the root node of this language
*/
public abstract class JjtreeParserAdapter<R extends RootNode> implements Parser {
protected JjtreeParserAdapter() {
// inheritance only
}
protected abstract JavaccTokenDocument.TokenDocumentBehavior tokenBehavior();
@Override
public final R parse(ParserTask task) throws ParseException {
try {
// First read the source file and interpret escapes
CharStream charStream = CharStream.create(task.getTextDocument(), tokenBehavior());
// We replace the text document, so that it reflects escapes properly
// Escapes are processed by CharStream#create
task = task.withTextDocument(charStream.getTokenDocument().getTextDocument());
// Finally, do the parsing
return parseImpl(charStream, task);
} catch (FileAnalysisException tme) {
throw tme.setFileId(task.getTextDocument().getFileId());
}
}
protected abstract R parseImpl(CharStream cs, ParserTask task) throws ParseException;
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| 1,733 | 33.68 | 95 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JavaccTokenDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import java.util.Collections;
import java.util.List;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.cpd.impl.JavaCCTokenizer;
import net.sourceforge.pmd.lang.ast.impl.TokenDocument;
import net.sourceforge.pmd.lang.document.TextDocument;
/**
* Token document for Javacc implementations. This is a helper object
* for generated token managers. Note: the extension point is a custom
* implementation of {@link TokenDocumentBehavior}, see {@link JjtreeParserAdapter#tokenBehavior()},
* {@link JavaCCTokenizer#tokenBehavior()}
*/
public final class JavaccTokenDocument extends TokenDocument<JavaccToken> {
private final TokenDocumentBehavior behavior;
private JavaccToken first;
public JavaccTokenDocument(TextDocument textDocument, TokenDocumentBehavior behavior) {
super(textDocument);
this.behavior = behavior;
}
/**
* Overridable configuration of a token document.
*/
public static class TokenDocumentBehavior {
public static final TokenDocumentBehavior DEFAULT = new TokenDocumentBehavior(Collections.emptyList());
private final List<String> tokenNames;
public TokenDocumentBehavior(List<String> tokenNames) {
this.tokenNames = tokenNames;
}
/**
* Returns true if the lexer should accumulate the image of MORE
* tokens into the StringBuilder jjimage. This is useless in our
* current implementations, because the image of tokens can be cut
* out using text coordinates, so doesn't need to be put into a separate string.
* The default returns false, which makes {@link CharStream#appendSuffix(StringBuilder, int)} a noop.
*/
public boolean useMarkSuffix() {
return false;
}
/**
* Translate the escapes of the source document. The default implementation
* does not perform any escaping.
*
* @param text Source doc
*
* @see EscapeTranslator
*
* TODO move that to LanguageVersionHandler once #3919 (Merge CPD and PMD language) is implemented
*/
public TextDocument translate(TextDocument text) throws MalformedSourceException {
return text;
}
/**
* Returns a string that describes the token kind.
*
* @param kind Kind of token
*
* @return A descriptive string
*/
public final @NonNull String describeKind(int kind) {
if (kind == JavaccToken.IMPLICIT_TOKEN) {
return "<implicit token>";
}
String impl = describeKindImpl(kind);
if (impl != null) {
return impl;
}
return "<token of kind " + kind + ">";
}
/**
* Describe the given kind. If this returns a non-null value, then
* that's what {@link #describeKind(int)} will use. Otherwise a default
* implementation is used.
*
* <p>An implementation typically uses the JavaCC-generated array
* named {@code <parser name>Constants.tokenImage}. Remember to
* check the bounds of the array.
*
* @param kind Kind of token
*
* @return A descriptive string, or null to use default
*/
protected @Nullable String describeKindImpl(int kind) {
if (kind >= 0 && kind < tokenNames.size()) {
return tokenNames.get(kind);
}
return null;
}
/**
* Creates a new token with the given kind. This is called back to
* by JavaCC-generated token managers (jjFillToken). Note that a
* created token is not guaranteed to end up in the final token chain.
*
* @param kind Kind of the token
* @param cs Char stream of the file. This can be used to get text
* coordinates and the image
* @param image Shared instance of the image token. If this is non-null,
* then no call to {@link CharStream#getTokenImage()} should be
* issued.
*
* @return A new token
*/
public JavaccToken createToken(JavaccTokenDocument self, int kind, CharStream cs, @Nullable String image) {
return new JavaccToken(
kind,
image == null ? cs.getTokenImageCs() : image,
cs.getStartOffset(),
cs.getEndOffset(),
self
);
}
}
boolean useMarkSuffix() {
return behavior.useMarkSuffix();
}
/**
* Open the document. This is only meant to be used by a Javacc-generated
* parser.
*
* @return The token for the document start. This token is implicit and
* will never end up in the final token chain.
*
* @throws IllegalStateException If the document has already been opened
*/
public JavaccToken open() {
synchronized (this) {
if (first != null) {
throw new RuntimeException("Document is already opened");
}
first = JavaccToken.newImplicit(0, this);
}
return first;
}
@Override
public JavaccToken getFirstToken() {
if (first == null || first.next == null) {
throw new IllegalStateException("Document has not been opened");
}
return first.next;
}
/**
* @see TokenDocumentBehavior#describeKind(int)
*/
public @NonNull String describeKind(int kind) {
return behavior.describeKind(kind);
}
/**
* @see TokenDocumentBehavior#createToken(JavaccTokenDocument, int, CharStream, String)
*/
public JavaccToken createToken(int kind, CharStream cs, @Nullable String image) {
return behavior.createToken(this, kind, cs, image);
}
}
| 6,171 | 32.726776 | 115 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/MalformedSourceException.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import java.util.Objects;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.ast.FileAnalysisException;
import net.sourceforge.pmd.lang.document.FileLocation;
/**
* A {@link FileAnalysisException} thrown when the source format is invalid,
* for example if some unicode escapes cannot be translated.
*/
public class MalformedSourceException extends FileAnalysisException {
private final FileLocation location;
public MalformedSourceException(String message, Throwable cause, FileLocation fileLocation) {
super(message, cause);
this.location = Objects.requireNonNull(fileLocation);
setFileId(fileLocation.getFileId());
}
@Override
protected @NonNull FileLocation location() {
return location;
}
@Override
protected String errorKind() {
return "Source format error";
}
}
| 1,033 | 26.210526 | 97 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JjtreeBuilder.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import java.util.ArrayList;
import java.util.List;
/**
* Shared implementation of the tree builder generated by JJTree.
*
* @param <N> Internal base class for nodes
*/
public final class JjtreeBuilder<N extends AbstractJjtreeNode<N, ?>> {
private final List<N> nodes = new ArrayList<>();
private final List<Integer> marks = new ArrayList<>();
private int sp = 0; // number of nodes on stack
private int mk = 0; // current mark
private boolean nodeCreated;
/*** If non-zero, then the top "n" nodes of the stack will be injected as the first children of the next
* node to be opened. This is not very flexible, but it's enough. The grammar needs to take
* care of the order in which nodes are opened in a few places, in most cases this just means using
* eg A() B() #N(2) instead of (A() B()) #N, so as not to open N before A.
*/
private int numPendingInjection;
/**
* Determines whether the current node was actually closed and
* pushed. This should only be called in the final user action of a
* node scope.
*/
public boolean nodeCreated() {
return nodeCreated;
}
/**
* Call this to reinitialize the node stack. It is called
* automatically by the parser's ReInit() method.
*/
public void reset() {
nodes.clear();
marks.clear();
sp = 0;
mk = 0;
}
/**
* Returns the root node of the AST. It only makes sense to call
* this after a successful parse.
*/
public N rootNode() {
return nodes.get(0);
}
/***
* Peek the nth node from the top of the stack.
* peekNode(0) == peekNode()
*/
public N peekNode(int n) {
return nodes.get(nodes.size() - n - 1);
}
public boolean isInjectionPending() {
return numPendingInjection > 0;
}
public void injectRight(int n) {
numPendingInjection = n;
}
/** Pushes a node on to the stack. */
public void pushNode(N n) {
nodes.add(n);
++sp;
}
/**
* Returns the node on the top of the stack, and remove it from the
* stack.
*/
public N popNode() {
--sp;
if (sp < mk) {
mk = marks.remove(marks.size() - 1);
}
return nodes.remove(nodes.size() - 1);
}
/** Returns the node currently on the top of the stack. */
public N peekNode() {
return nodes.get(nodes.size() - 1);
}
/**
* Returns the number of children on the stack in the current node
* scope.
*/
public int nodeArity() {
return sp - mk;
}
public void clearNodeScope(N n) {
while (sp > mk) {
popNode();
}
mk = marks.remove(marks.size() - 1);
}
public void openNodeScope(N n, JavaccToken firstToken) {
marks.add(mk);
mk = sp;
if (isInjectionPending()) {
mk -= numPendingInjection;
numPendingInjection = 0;
}
n.setFirstToken(firstToken);
n.jjtOpen();
}
/**
* Close the node scope and adds the given number of children to the
* node. A definite node is constructed from a specified number of
* children. That number of nodes are popped from the stack and
* made the children of the definite node. Then the definite node
* is pushed on to the stack.
*/
public void closeNodeScope(N n, final int num, JavaccToken lastToken) {
int a = nodeArity();
mk = marks.remove(marks.size() - 1);
N child = null;
int i = num;
while (i-- > 0) {
child = popNode();
n.addChild(child, i);
}
if (child != null && num > a) {
// this node has more children that what was in its node scope
// (ie first token is wrong)
n.setFirstToken(child.getFirstToken());
}
closeImpl(n, lastToken);
}
/**
* Close the node scope if the condition is true.
* All the nodes that have been pushed since the node was opened are
* made children of the conditional node, which is then pushed on to
* the stack. If the condition is false the node is not constructed
* and they are left on the stack.
*
* @param n Node to close
* @param condition Whether to close the node or not
* @param lastToken Last token that was consumed while the node scope was open
*/
public void closeNodeScope(N n, boolean condition, JavaccToken lastToken) {
if (condition) {
int a = nodeArity();
mk = marks.remove(marks.size() - 1);
while (a-- > 0) {
n.addChild(popNode(), a);
}
closeImpl(n, lastToken);
} else {
mk = marks.remove(marks.size() - 1);
nodeCreated = false;
}
}
private void closeImpl(N n, JavaccToken lastToken) {
if (lastToken.getNext() == n.getFirstToken()) { // NOPMD CompareObjectsWithEquals
// this means, that the node has zero length.
// create an implicit token to represent this case.
JavaccToken implicit = JavaccToken.implicitBefore(lastToken.getNext());
n.setFirstToken(implicit);
n.setLastToken(implicit);
} else {
n.setLastToken(lastToken);
}
// note that the last token has been set before jjtClose
n.jjtClose();
pushNode(n);
nodeCreated = true;
}
}
| 5,711 | 27.41791 | 108 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JavaEscapeTranslator.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.TextDocument;
/**
* An implementation of {@link EscapeTranslator} that translates Java
* unicode escapes.
*/
@SuppressWarnings("PMD.AssignmentInOperand")
public final class JavaEscapeTranslator extends BackslashEscapeTranslator {
public JavaEscapeTranslator(TextDocument input) {
super(input);
}
@Override
protected int handleBackslash(final int maxOff, final int firstBackslashOff) throws MalformedSourceException {
int off = firstBackslashOff;
while (off < input.length() && input.charAt(off) == '\\') {
off++;
}
int bslashCount = off - firstBackslashOff;
// is there an escape at offset firstBslashOff?
if ((bslashCount & 1) == 1 // odd number of backslashes
&& off < input.length() && input.charAt(off) == 'u') { // at least one 'u'
// this is enough to expect an escape or throw an exception
while (off < input.length() && input.charAt(off) == 'u') {
// consume all the 'u's
off++;
}
Chars value = escapeValue(firstBackslashOff, off - 1);
int endOffset = off + 4; // + 4 hex digits
return recordEscape(firstBackslashOff, endOffset, value);
} else {
return abortEscape(off, maxOff);
}
}
private Chars escapeValue(int posOfFirstBackSlash, final int offOfTheU) throws MalformedSourceException {
int off = offOfTheU;
try {
char c = (char)
( hexVal(input.charAt(++off)) << 12 // SUPPRESS CHECKSTYLE paren pad
| hexVal(input.charAt(++off)) << 8
| hexVal(input.charAt(++off)) << 4
| hexVal(input.charAt(++off))
);
return Chars.wrap(Character.toString(c));
} catch (NumberFormatException | IndexOutOfBoundsException e) {
// cut off u and 4 digits
String escape = input.substring(offOfTheU, Math.min(input.length(), offOfTheU + 5));
throw new MalformedSourceException("Invalid unicode escape \\" + escape, e, locationAt(posOfFirstBackSlash));
}
}
private static int hexVal(char c) {
switch (c) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
return c - '0';
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
return c - ('A' - 10);
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
return c - ('a' - 10);
default:
throw new NumberFormatException("Character '" + c + "' is not a valid hexadecimal digit");
}
}
}
| 3,086 | 31.494737 | 121 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/CharStream.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import java.io.EOFException;
import net.sourceforge.pmd.lang.ast.impl.javacc.JavaccTokenDocument.TokenDocumentBehavior;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.lang.document.TextRegion;
/**
* PMD flavour of character streams used by JavaCC parsers.
*/
public final class CharStream {
private final JavaccTokenDocument tokenDoc;
private final TextDocument textDoc;
private final Chars chars;
private final boolean useMarkSuffix;
private int curOffset;
private int markOffset;
private CharStream(JavaccTokenDocument tokenDoc) {
this.tokenDoc = tokenDoc;
this.textDoc = tokenDoc.getTextDocument();
this.chars = textDoc.getText();
this.useMarkSuffix = tokenDoc.useMarkSuffix();
}
/**
* Create a new char stream for the given document. This may create
* a new {@link TextDocument} view over the original, which reflects
* its character escapes.
*/
public static CharStream create(TextDocument doc, TokenDocumentBehavior behavior) throws MalformedSourceException {
TextDocument translated = behavior.translate(doc);
return new CharStream(new JavaccTokenDocument(translated, behavior));
}
/**
* Returns the next character from the input. After a {@link #backup(int)},
* some of the already read chars must be spit out again.
*
* @return The next character
*
* @throws EOFException Upon EOF
*/
public char readChar() throws EOFException {
if (curOffset == chars.length()) {
throw new EOFException();
}
return chars.charAt(curOffset++);
}
/**
* Calls {@link #readChar()} and returns its value, marking its position
* as the beginning of the next token. All characters must remain in
* the buffer between two successive calls to this method to implement
* backup correctly.
*/
public char markTokenStart() throws EOFException {
markOffset = curOffset;
return readChar();
}
/**
* Returns a string made up of characters from the token mark up to
* to the current buffer position.
*/
public String getTokenImage() {
return getTokenImageCs().toString();
}
/**
* Returns a string made up of characters from the token mark up to
* to the current buffer position.
*/
public Chars getTokenImageCs() {
assert markOffset >= 0;
return chars.slice(markOffset, markLen());
}
private int markLen() {
return curOffset - markOffset;
}
/**
* Appends the suffix of length 'len' of the current token to the given
* string builder. This is used to build up the matched string
* for use in actions in the case of MORE.
*
* @param len Length of the returned array
*
* @throws IndexOutOfBoundsException If len is greater than the length of the current token
*/
public void appendSuffix(StringBuilder sb, int len) {
if (useMarkSuffix) {
assert len <= markLen() : "Suffix is greater than the mark length? " + len + " > " + markLen();
chars.appendChars(sb, curOffset - len, len);
} // otherwise dead code, kept because Javacc's argument expressions do side effects
}
/**
* Pushes a given number of already read chars into the buffer.
* Subsequent calls to {@link #readChar()} will read those characters
* before proceeding to read the underlying char stream.
*
* <p>A lexer calls this method if it has already read some characters,
* but cannot use them to match a (longer) token. So, they will
* be used again as the prefix of the next token.
*
* @throws AssertionError If the requested amount is greater than the
* length of the mark
*/
public void backup(int amount) {
if (amount > markLen()) {
throw new IllegalArgumentException();
}
curOffset -= amount;
}
/**
* Returns the column number of the last character for the current token.
* This is only used for parse exceptions and is very inefficient.
*/
public int getEndColumn() {
return endLocation().getEndColumn();
}
/**
* Returns the line number of the last character for current token.
* This is only used for parse exceptions and is very inefficient.
*/
public int getEndLine() {
return endLocation().getEndLine();
}
private FileLocation endLocation() {
return textDoc.toLocation(TextRegion.caretAt(getEndOffset()));
}
/** Returns the start offset of the current token (in the translated source), inclusive. */
public int getStartOffset() {
return markOffset;
}
/** Returns the end offset of the current token (in the translated source), exclusive. */
public int getEndOffset() {
return curOffset;
}
/**
* Returns the token document for the tokens being built. Having it
* here is the most convenient place for the time being.
*/
public JavaccTokenDocument getTokenDocument() {
return tokenDoc;
}
}
| 5,459 | 30.2 | 119 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/AbstractTokenManager.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import java.util.HashMap;
import java.util.Map;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.lang.TokenManager;
/**
* A base class for the token managers generated by JavaCC.
*/
public abstract class AbstractTokenManager implements TokenManager<JavaccToken> {
protected Map<Integer, String> suppressMap = new HashMap<>();
protected String suppressMarker = PMDConfiguration.DEFAULT_SUPPRESS_MARKER;
public void setSuppressMarker(String marker) {
this.suppressMarker = marker;
}
public Map<Integer, String> getSuppressMap() {
return suppressMap;
}
}
| 761 | 25.275862 | 81 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JavaccToken.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.document.Chars;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.TextRegion;
/**
* A generic token implementation for JavaCC parsers.
*
* <p>Largely has the same interface as the default generated token class.
* The main difference is that the position of the token is encoded as
* a start and end offset in the source file, instead of a (begin,end)x(line,column)
* 4-tuple. This offers two practical advantages:
* <ul>
* <li>It allows retrieving easily the underlying text of a node (just
* need to cut a substring of the file text). Other attributes like lines
* and column bounds can be derived as well - though this should not be
* done systematically because it's costlier.
* <li>It's a bit lighter. Token instances are one of the most numerous
* class in a typical PMD run and this may reduce GC pressure.
* </ul>
*/
public class JavaccToken implements GenericToken<JavaccToken> {
/**
* Kind for EOF tokens.
*/
public static final int EOF = 0;
/**
* Kind for implicit tokens. Negative because JavaCC only picks
* positive numbers for token kinds.
*/
public static final int IMPLICIT_TOKEN = -1;
/**
* An integer that describes the kind of this token. This numbering
* system is determined by JavaCCParser, and a table of these numbers is
* stored in the file ...Constants.java.
*/
public final int kind;
private final JavaccTokenDocument document;
private final CharSequence image;
private final int startOffset;
private final int endOffset;
/**
* A reference to the next regular (non-special) token from the input
* stream. If this is the last token from the input stream, or if the
* token manager has not read tokens beyond this one, this field is
* set to null. This is true only if this token is also a regular
* token. Otherwise, see below for a description of the contents of
* this field.
*/
public JavaccToken next;
/**
* This field is used to access special tokens that occur prior to this
* token, but after the immediately preceding regular (non-special) token.
* If there are no such special tokens, this field is set to null.
* When there are more than one such special token, this field refers
* to the last of these special tokens, which in turn refers to the next
* previous special token through its specialToken field, and so on
* until the first special token (whose specialToken field is null).
* The next fields of special tokens refer to other special tokens that
* immediately follow it (without an intervening regular token). If there
* is no such token, this field is null.
*/
public JavaccToken specialToken;
// common constructor, with a CharSequence parameter
JavaccToken(int kind, CharSequence image, int startInclusive, int endExclusive, JavaccTokenDocument document) {
assert document != null : "Null document";
assert image instanceof String || image instanceof Chars : "Null image";
assert TextRegion.isValidRegion(startInclusive, endExclusive, document.getTextDocument());
this.kind = kind;
this.image = image;
this.startOffset = startInclusive;
this.endOffset = endExclusive;
this.document = document;
}
/**
* Builds a new token of the specified kind.
*
* @param kind Kind of token
* @param image Image of the token (after translating escapes if any)
* @param startInclusive Start character of the token in the text file (before translating escapes)
* @param endExclusive End of the token in the text file (before translating escapes)
* @param document Document owning the token
*/
public JavaccToken(int kind, Chars image, int startInclusive, int endExclusive, JavaccTokenDocument document) {
this(kind, (CharSequence) image, startInclusive, endExclusive, document);
}
/**
* Constructor with a {@link String} image (see {@link #JavaccToken(int, Chars, int, int, JavaccTokenDocument) the other ctor}).
*/
public JavaccToken(int kind, String image, int startInclusive, int endExclusive, JavaccTokenDocument document) {
this(kind, (CharSequence) image, startInclusive, endExclusive, document);
}
/**
* Returns the document owning this token.
*/
public final JavaccTokenDocument getDocument() {
return document;
}
@Override
public boolean isEof() {
return kind == EOF;
}
@Override
public int getKind() {
return kind;
}
@Override
public JavaccToken getNext() {
return next;
}
@Override
public JavaccToken getPreviousComment() {
return specialToken;
}
@Override
public Chars getImageCs() {
// wrap it: it's zero cost (images are either Chars or String) and Chars has a nice API
return Chars.wrap(image);
}
@Override
public String getImage() {
return image.toString();
}
@Override
public final TextRegion getRegion() {
return TextRegion.fromBothOffsets(startOffset, endOffset);
}
int getStartOffset() {
return startOffset;
}
int getEndOffset() {
return endOffset;
}
@Override
public FileLocation getReportLocation() {
return document.getTextDocument().toLocation(getRegion());
}
@Override
public boolean isImplicit() {
return kind == IMPLICIT_TOKEN;
}
@Override
public String toString() {
return document.describeKind(kind) + ": " + getImage();
}
/**
* Returns a new token with the same kind as this one, whose image
* is replaced by the one marked on the char stream.
*
* @param charStream Char stream from which to start
*
* @return A new token
*/
public JavaccToken replaceImage(CharStream charStream) {
return new JavaccToken(
this.kind,
charStream.getTokenImageCs(),
this.startOffset,
charStream.getEndOffset(),
this.document
);
}
/**
* Returns a new token with the given kind, and all other parameters
* identical to this one.
*
* @param newKind Char stream from which to start
*
* @return A new token
*/
public JavaccToken withKind(int newKind) {
JavaccToken tok = new JavaccToken(
newKind,
this.image,
this.startOffset,
this.endOffset,
this.document
);
tok.specialToken = this.specialToken;
tok.next = this.next;
return tok;
}
/**
* Creates an implicit token, with zero length, that is linked to
* the given token as its special predecessor.
*
* @param next Token before which to insert the new token
*
* @return A new token
*/
public static JavaccToken implicitBefore(JavaccToken next) {
JavaccToken implicit = newImplicit(next.getRegion().getStartOffset(), next.document);
// insert it right before the next token
// as a special token
implicit.next = next;
if (next.specialToken != null) {
next.specialToken.next = implicit;
implicit.specialToken = next.specialToken;
}
next.specialToken = implicit;
return implicit;
}
/**
* Returns a new implicit token, positioned at the given offset.
*
* @param offset Offset of the token
* @param document Document owning the token
*
* @return A new token
*/
public static JavaccToken newImplicit(int offset, JavaccTokenDocument document) {
return new JavaccToken(IMPLICIT_TOKEN, "", offset, offset, document);
}
}
| 8,140 | 30.677043 | 132 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/AbstractJjtreeNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.impl.AbstractNode;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.util.StringUtil;
/**
* Base class for node produced by JJTree. JJTree specific functionality
* present on the API of {@link Node} and {@link AbstractNode} will be
* moved here for 7.0.0.
*
* <p>This is experimental because it's expected to change for 7.0.0 in
* unforeseeable ways. Don't use it directly, use the node interfaces.
*/
@Experimental
public abstract class AbstractJjtreeNode<B extends AbstractJjtreeNode<B, N>, N extends JjtreeNode<N>> extends AbstractNode<B, N> implements JjtreeNode<N> {
protected final int id;
private JavaccToken firstToken;
private JavaccToken lastToken;
private String image;
/**
* The id is an index in the constant names array generated by jjtree,
* it must be set to some value that depends on the node type, not some
* arbitrary "1" or "2", and not necessarily a unique value.
*/
protected AbstractJjtreeNode(int id) {
super();
this.id = id;
}
@Override
// @Deprecated // todo deprecate, will change tree dump tests
public String getImage() {
return image;
}
protected void setImage(String image) {
this.image = image;
}
@Override
public final TextRegion getTextRegion() {
return TextRegion.fromBothOffsets(getFirstToken().getStartOffset(),
getLastToken().getEndOffset());
}
@Override
public final int compareLocation(Node other) {
if (other instanceof JjtreeNode<?>) {
return getTextRegion().compareTo(((JjtreeNode<?>) other).getTextRegion());
}
return super.compareLocation(other);
}
/**
* This method is called after the node has been made the current node. It
* indicates that child nodes can now be added to it.
*/
protected void jjtOpen() {
// to be overridden
}
/**
* This method is called after all the child nodes have been added.
*/
protected void jjtClose() {
// to be overridden
}
@Override // override to make it protected
protected void addChild(B child, int index) {
super.addChild(child, index);
}
@Override
protected void insertChild(B child, int index) {
super.insertChild(child, index);
fitTokensToChildren(index);
}
/**
* Ensures that the first (resp. last) token of this node is before
* (resp. after) the first (resp. last) token of the child at the
* given index. The index
*/
protected void fitTokensToChildren(int index) {
if (index == 0) {
enlargeLeft((B) getChild(index));
}
if (index == getNumChildren()) {
enlargeRight((B) getChild(index));
}
}
private void enlargeLeft(B child) {
JavaccToken thisFst = this.getFirstToken();
JavaccToken childFst = child.getFirstToken();
if (childFst.compareTo(thisFst) < 0) {
this.setFirstToken(childFst);
}
}
private void enlargeRight(B child) {
JavaccToken thisLast = this.getLastToken();
JavaccToken childLast = child.getLastToken();
if (childLast.compareTo(thisLast) > 0) {
this.setLastToken(childLast);
}
}
@Override
public JavaccToken getFirstToken() {
return firstToken;
}
@Override
public JavaccToken getLastToken() {
return lastToken;
}
// the super methods query line & column, which we want to avoid
protected void setLastToken(JavaccToken token) {
this.lastToken = token;
}
protected void setFirstToken(JavaccToken token) {
this.firstToken = token;
}
/**
* This toString implementation is only meant for debugging purposes.
*/
@Override
public String toString() {
FileLocation loc = getReportLocation();
return "!debug only! [" + getXPathNodeName() + ":" + loc.getStartPos().toDisplayStringWithColon() + "]"
+ StringUtil.elide(getText().toString(), 150, "(truncated)");
}
}
| 4,491 | 28.359477 | 155 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/javacc/JjtreeNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.javacc;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.ast.TextAvailableNode;
import net.sourceforge.pmd.lang.ast.impl.GenericNode;
/**
* Base interface for nodes that are produced by a JJTree parser. Our
* JJTree implementation gives {@link TextAvailableNode} for free, access
* to tokens is also guaranteed.
*
* @param <N> Self type
*/
public interface JjtreeNode<N extends JjtreeNode<N>> extends GenericNode<N>, TextAvailableNode {
// todo token accessors should most likely be protected in PMD 7.
JavaccToken getFirstToken();
JavaccToken getLastToken();
/**
* Returns a token range, that includes the first and last token.
*/
default Iterable<JavaccToken> tokens() {
return GenericToken.range(getFirstToken(), getLastToken());
}
}
| 957 | 25.611111 | 96 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/BaseAntlrErrorNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.Token;
import org.checkerframework.checker.nullness.qual.NonNull;
public abstract class BaseAntlrErrorNode<N extends AntlrNode<N>> extends BaseAntlrTerminalNode<N> {
protected BaseAntlrErrorNode(Token symbol) {
super(symbol, true);
}
@Override
protected final AntlrErrorPmdAdapter<N> asAntlrNode() {
return (AntlrErrorPmdAdapter<N>) super.asAntlrNode();
}
@Override
public @NonNull String getText() {
return getFirstAntlrToken().getText();
}
@Override
public final String getXPathNodeName() {
return "Error";
}
}
| 762 | 22.84375 | 99 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrToken.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.Token;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.lang.document.TextDocument;
import net.sourceforge.pmd.lang.document.TextRegion;
/**
* Generic Antlr representation of a token.
*/
public class AntlrToken implements GenericToken<AntlrToken> {
private final Token token;
private final AntlrToken previousComment;
private final TextDocument textDoc;
AntlrToken next;
/**
* Constructor
*
* @param token The antlr token implementation
* @param previousComment The previous comment
* @param textDoc The text document
*/
public AntlrToken(final Token token, final AntlrToken previousComment, TextDocument textDoc) {
this.token = token;
this.previousComment = previousComment;
this.textDoc = textDoc;
}
@Override
public AntlrToken getNext() {
return next;
}
@Override
public AntlrToken getPreviousComment() {
return previousComment;
}
@Override
public CharSequence getImageCs() {
return token.getText();
}
/** Returns a text region with the coordinates of this token. */
@Override
public TextRegion getRegion() {
return TextRegion.fromBothOffsets(token.getStartIndex(), token.getStopIndex() + 1);
}
@Override
public FileLocation getReportLocation() {
return textDoc.toLocation(getRegion());
}
@Override
public boolean isEof() {
return getKind() == Token.EOF;
}
@Override
public int compareTo(AntlrToken o) {
return getRegion().compareTo(o.getRegion());
}
@Override
@Experimental
public int getKind() {
return token.getType();
}
public boolean isHidden() {
return !isDefault();
}
public boolean isDefault() {
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}
| 2,223 | 23.711111 | 98 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/BaseAntlrNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.tree.ErrorNode;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.TerminalNode;
import net.sourceforge.pmd.lang.ast.impl.GenericNode;
import net.sourceforge.pmd.lang.ast.impl.antlr4.BaseAntlrNode.AntlrToPmdParseTreeAdapter;
import net.sourceforge.pmd.lang.document.TextRegion;
import net.sourceforge.pmd.util.DataMap;
import net.sourceforge.pmd.util.DataMap.DataKey;
/**
* Base class for an antlr node. This implements the PMD interfaces only,
* not the antlr ones. It wraps an antlr node (they are linked both ways).
* Antlr primarily distinguishes {@link ParserRuleContext} for inner nodes,
* {@link TerminalNode} for nodes that wrap tokens (and can have no children),
* and {@link ErrorNode}, a subtype of {@link TerminalNode}. These each have
* a base class here, which refines the type of the underlying antlr node:
* {@link BaseAntlrInnerNode}, {@link BaseAntlrTerminalNode} and {@link BaseAntlrErrorNode}.
* These must be implemented in each language module with a class that also
* implements {@code <N>}.
*
* <p>During tree construction, the antlr runtime does its thing with the
* underlying antlr nodes. The PMD nodes are just wrappers around those,
* that respect the contract of {@link GenericNode}.
*
* @param <A> Type of the underlying antlr node
* @param <N> Public interface (eg SwiftNode)
*/
public abstract class BaseAntlrNode<A extends AntlrToPmdParseTreeAdapter<N>, N extends AntlrNode<N>> implements AntlrNode<N> {
private DataMap<DataKey<?, ?>> userMap;
/**
* The only node for which this is not overwritten is the root node, for
* which by contract, this is -1.
*/
private int indexInParent = -1;
protected BaseAntlrNode() {
// protected
}
/**
* Recurses over the text of all terminal descendants to build the
* text of this node (without spaces). This is extremely inefficient
* and should not be used to write rules. The antlr impl doesn't even
* use a single stringbuilder.
*
* @deprecated Some rules depend on it and have not been rewritten
*/
@Deprecated
public String joinTokenText() {
return asAntlrNode().getText();
}
// these are an implementation detail, meant as a crutch while some
// rules depend on it
// Should be made protected
public abstract Token getFirstAntlrToken();
public abstract Token getLastAntlrToken();
@Override
public TextRegion getTextRegion() {
return TextRegion.fromBothOffsets(getFirstAntlrToken().getStartIndex(),
getFirstAntlrToken().getStopIndex());
}
void setIndexInParent(int indexInParent) {
this.indexInParent = indexInParent;
}
@Override
public N getParent() {
AntlrToPmdParseTreeAdapter<N> parent = asAntlrNode().getParent();
return parent == null ? null : (N) parent.getPmdNode();
}
@Override
public int getBeginLine() {
return getFirstAntlrToken().getLine(); // This goes from 1 to n
}
@Override
public int getEndLine() {
// FIXME this is not the end line if the stop token spans several lines
return getLastAntlrToken().getLine();
}
@Override
public int getBeginColumn() {
return getFirstAntlrToken().getCharPositionInLine() + 1;
}
@Override
public int getEndColumn() {
Token tok = getLastAntlrToken();
return tok.getCharPositionInLine() + tok.getStopIndex() - tok.getStartIndex() + 1;
}
@Override
public int getIndexInParent() {
assert getParent() == null || indexInParent >= 0 : "Index not set";
return indexInParent;
}
@Override
public DataMap<DataKey<?, ?>> getUserMap() {
if (userMap == null) {
userMap = DataMap.newDataMap();
}
return userMap;
}
protected abstract A asAntlrNode();
protected interface AntlrToPmdParseTreeAdapter<N extends AntlrNode<N>> extends ParseTree {
BaseAntlrNode<?, N> getPmdNode();
@Override
AntlrToPmdParseTreeAdapter<N> getParent();
}
}
| 4,393 | 31.308824 | 126 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import net.sourceforge.pmd.lang.ast.impl.GenericNode;
/**
* Base interface for all Antlr-based implementation of the Node interface.
*/
public interface AntlrNode<N extends AntlrNode<N>> extends GenericNode<N> {
}
| 357 | 22.866667 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrGeneratedParserBase.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.Parser;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.TokenStream;
import org.antlr.v4.runtime.tree.ErrorNode;
import org.antlr.v4.runtime.tree.TerminalNode;
import net.sourceforge.pmd.lang.ast.Node;
/**
* This is the base class for antlr generated parsers. The implementation
* of PMD's {@link net.sourceforge.pmd.lang.ast.Parser} interface is {@link AntlrBaseParser}.
*
* <p>This class must implement the two abstract methods to create terminals
* and error nodes that implement {@code <N>}. The inner nodes implement PMD
* interfaces, and manipulation methods that the {@link Parser} superclass
* uses are redirected to the underlying antlr {@link ParserRuleContext} (the
* protected overloads here).
*
* <p>This is not enough in general to make the generated parser compilable,
* so an ant script does some cleanup at the end.
*
* <p>Additionally this must have a {@link AntlrNameDictionary} static final field,
* which stores the XPath names of the generated nodes (and terminals).
*
* <p>Additional members can be added to a parser with {@code @parser::members { ... }}
* in the g4 file. That's how the implementations for {@link #createPmdTerminal(ParserRuleContext, Token)}
* and {@link #createPmdError(ParserRuleContext, Token)} can be added.
*/
public abstract class AntlrGeneratedParserBase<N extends AntlrNode<N>> extends Parser {
public AntlrGeneratedParserBase(TokenStream input) {
super(input);
}
@Override
public TerminalNode createTerminalNode(ParserRuleContext parent, Token t) {
return createPmdTerminal(parent, t).asAntlrNode();
}
@Override
public ErrorNode createErrorNode(ParserRuleContext parent, Token t) {
return createPmdError(parent, t).asAntlrNode();
}
// Those two need to return a node that implements eg SwiftNode
protected abstract BaseAntlrTerminalNode<N> createPmdTerminal(ParserRuleContext parent, Token t);
protected abstract BaseAntlrErrorNode<N> createPmdError(ParserRuleContext parent, Token t);
protected Node asPmdNode(RuleContext ctx) {
return ((BaseAntlrNode.AntlrToPmdParseTreeAdapter<?>) ctx).getPmdNode();
}
// Necessary API to build the trees
protected void enterRule(BaseAntlrInnerNode<N> ptree, int state, int alt) {
enterRule(ptree.asAntlrNode(), state, alt);
}
protected void enterOuterAlt(BaseAntlrInnerNode<N> localctx, int altNum) {
enterOuterAlt(localctx.asAntlrNode(), altNum);
}
protected void pushNewRecursionContext(BaseAntlrInnerNode<N> localctx, int state, int ruleIndex) {
pushNewRecursionContext(localctx.asAntlrNode(), state, ruleIndex);
}
protected void enterRecursionRule(BaseAntlrInnerNode<N> localctx, int state, int ruleIndex, int precedence) {
enterRecursionRule(localctx.asAntlrNode(), state, ruleIndex, precedence);
}
protected boolean sempred(BaseAntlrInnerNode<N> localctx, int ruleIndex, int predIndex) {
return sempred(localctx.asAntlrNode(), ruleIndex, predIndex);
}
}
| 3,324 | 36.359551 | 113 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrNameDictionary.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Stream;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.Vocabulary;
import org.apache.commons.lang3.StringUtils;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* Stores the XPath name of antlr terminals. I found no simple way to
* give names to punctuation (we could add a lexer rule, but it may
* conflict with other tokens). So their names are hardcoded here.
*
* <p>Terminal names start with {@code "T-"} in XPath to avoid conflicts
* with other stuff.
*/
public class AntlrNameDictionary {
private final String[] terminalXPathNames;
private final String[] terminalImages;
private final String[] nonTermXpathNames;
private final Vocabulary vocabulary;
public AntlrNameDictionary(Vocabulary vocab, String[] ruleNames) {
this.vocabulary = vocab;
nonTermXpathNames = new String[ruleNames.length];
for (int i = 0; i < nonTermXpathNames.length; i++) {
nonTermXpathNames[i] = StringUtils.capitalize(ruleNames[i]);
}
Set<String> seen = new HashSet<>();
Collections.addAll(seen, ruleNames);
// terminal names
terminalXPathNames = new String[vocab.getMaxTokenType()];
terminalXPathNames[0] = "Invalid"; // See Token.INVALID_TYPE
terminalImages = new String[vocab.getMaxTokenType()];
terminalImages[0] = null;
for (int i = Token.MIN_USER_TOKEN_TYPE; i < terminalXPathNames.length; i++) {
String name = vocab.getSymbolicName(i);
String literalName = vocab.getLiteralName(i);
if (literalName != null) {
// cleanup literal name, Antlr surrounds the image with single quotes
literalName = literalName.substring(1, literalName.length() - 1);
terminalImages[i] = literalName;
}
if (name == null && literalName != null) {
name = literalName;
if (!name.matches("[a-zA-Z][\\w_-]+")) { // not alphanum
name = nonAlphaNumName(name);
} // otherwise something like "final"
}
assert name != null : "Token of kind " + i + " has no XPath name (literal " + vocab.getLiteralName(i) + ")";
String finalName = "T-" + name;
assert finalName.matches("[a-zA-Z][\\w_-]+") : "Not a valid XPath name " + finalName;
assert seen.add(finalName) : "Duplicate XPath name " + finalName;
terminalXPathNames[i] = finalName;
}
assert Stream.of(terminalXPathNames).distinct().count() == terminalXPathNames.length
: "Duplicate names in " + Arrays.toString(terminalXPathNames);
}
public Vocabulary getVocabulary() {
return vocabulary;
}
/**
* Override this to customize the XPath name of tokes with no symbolic
* name and with an image that is non-alphanumeric. Return null to give
* up. The default just gives some name to common punctuation. Remember
* that the same token may mean several things in different contexts, so
* eg using {@code "not"} as the name of {@code "!"} is too specific.
*/
protected @Nullable String nonAlphaNumName(String name) {
switch (name) {
case "!": return "bang";
case "!!": return "double-bang";
case "?": return "question";
case "??": return "double-question";
case "?:": return "elvis";
case "?.": return "question-dot";
case ":": return "colon";
case ";": return "semi";
case ",": return "comma";
case "(": return "lparen";
case ")": return "rparen";
case "[": return "lbracket";
case "]": return "rbracket";
case "{": return "lbrace";
case "}": return "rbrace";
case "_": return "underscore";
case ".": return "dot";
case "..": return "double-dot";
case "...": return "ellipsis";
case "@": return "at-symbol";
case "$": return "dollar";
case "\\": return "backslash";
case "/": return "slash";
case "//": return "double-slash";
case "`": return "backtick";
case "'": return "squote";
case "\"": return "dquote";
case "\"\"\"": return "triple-quote";
case ">": return "gt";
case ">=": return "ge";
case "<": return "lt";
case "<=": return "le";
case ">>": return "double-gt";
case "<<": return "double-lt";
case ">>>": return "triple-gt";
case "<<<": return "triple-lt";
case "=": return "eq";
case "==": return "double-eq";
case "===": return "triple-eq";
case "!=": return "not-eq";
case "&": return "amp";
case "&&": return "double-amp";
case "|": return "pipe";
case "||": return "double-pipe";
case "*": return "star";
case "**": return "double-star";
case "+": return "plus";
case "++": return "double-plus";
case "-": return "minus";
case "--": return "double-minus";
case "->": return "rarrow";
case "<-": return "larrow";
default:
return null;
}
}
/**
* Gets the xpath name of a terminal node with a given {@link Token#getType()}.
*
* @throws IllegalArgumentException If the index is invalid
*/
public @NonNull String getXPathNameOfToken(int tokenType) {
if (tokenType >= 0 && tokenType < terminalXPathNames.length) {
return terminalXPathNames[tokenType];
}
if (tokenType == Token.EOF) {
return "EOF";
}
throw new IllegalArgumentException("I don't know token type " + tokenType);
}
/**
* Returns the constant image of the given token (a shared string),
* or null if the token has none. This is a memory optimization to
* avoid creating a new string for tokens with constant images. Antlr
* does not do this by itself sadly.
*/
public @Nullable String getConstantImageOfToken(Token token) {
int tokenType = token.getType();
if (tokenType >= 0 && tokenType < terminalXPathNames.length) {
return terminalImages[tokenType];
} else if (token.getStartIndex() == token.getStopIndex()) {
return "";
}
return null;
}
/**
* Gets the xpath name of an inner node with a given {@link ParserRuleContext#getRuleIndex()}.
*
* @throws IndexOutOfBoundsException If the index is invalid
*/
public @NonNull String getXPathNameOfRule(int idx) {
return nonTermXpathNames[idx];
}
public int getMaxRuleIndex() {
return nonTermXpathNames.length;
}
public int getMaxTokenType() {
return vocabulary.getMaxTokenType();
}
}
| 7,234 | 31.886364 | 120 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/BaseAntlrTerminalNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.tree.ErrorNode;
import org.antlr.v4.runtime.tree.ParseTreeVisitor;
import org.antlr.v4.runtime.tree.TerminalNode;
import org.antlr.v4.runtime.tree.TerminalNodeImpl;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.ast.impl.antlr4.BaseAntlrTerminalNode.AntlrTerminalPmdAdapter;
/**
* Base class for terminal nodes (they wrap a {@link TerminalNode}).
*/
public abstract class BaseAntlrTerminalNode<N extends AntlrNode<N>>
extends BaseAntlrNode<AntlrTerminalPmdAdapter<N>, N> {
private final AntlrTerminalPmdAdapter<N> antlrNode;
protected BaseAntlrTerminalNode(Token symbol) {
this(symbol, false);
}
BaseAntlrTerminalNode(Token symbol, boolean isError) {
if (isError) {
this.antlrNode = new AntlrErrorPmdAdapter<>(this, symbol);
} else {
this.antlrNode = new AntlrTerminalPmdAdapter<>(this, symbol);
}
}
/**
* Returns the text of the token.
*
* @implNote This should use {@link AntlrNameDictionary#getConstantImageOfToken(Token)},
* or default to {@link Token#getText()}
*/
public abstract @NonNull String getText();
@Override
protected AntlrTerminalPmdAdapter<N> asAntlrNode() {
return antlrNode;
}
@Override
public Token getFirstAntlrToken() {
return antlrNode.symbol;
}
@Override
public Token getLastAntlrToken() {
return antlrNode.symbol;
}
@Override
public int getNumChildren() {
return 0;
}
protected int getTokenKind() {
return antlrNode.symbol.getTokenIndex();
}
@Override
public N getChild(int index) {
throw new IndexOutOfBoundsException("Index " + index + " for terminal node");
}
protected static class AntlrTerminalPmdAdapter<N extends AntlrNode<N>> extends TerminalNodeImpl implements BaseAntlrNode.AntlrToPmdParseTreeAdapter<N> {
private final BaseAntlrTerminalNode<N> pmdNode;
public AntlrTerminalPmdAdapter(BaseAntlrTerminalNode<N> pmdNode, Token symbol) {
super(symbol);
this.pmdNode = pmdNode;
}
@Override
public AntlrToPmdParseTreeAdapter<N> getParent() {
return (AntlrToPmdParseTreeAdapter<N>) super.getParent();
}
@Override
public void setParent(RuleContext parent) {
assert parent instanceof BaseAntlrNode.AntlrToPmdParseTreeAdapter;
super.setParent(parent);
}
@Override
public BaseAntlrNode<?, N> getPmdNode() {
return pmdNode;
}
}
protected static class AntlrErrorPmdAdapter<N extends AntlrNode<N>> extends AntlrTerminalPmdAdapter<N> implements ErrorNode {
public AntlrErrorPmdAdapter(BaseAntlrTerminalNode<N> pmdNode, Token symbol) {
super(pmdNode, symbol);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
return visitor.visitErrorNode(this);
}
}
}
| 3,288 | 28.106195 | 156 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/BaseAntlrInnerNode.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import java.util.List;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.RuleContext;
import org.antlr.v4.runtime.Token;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.ParseTreeListener;
import org.antlr.v4.runtime.tree.ParseTreeVisitor;
import org.antlr.v4.runtime.tree.RuleNode;
import org.antlr.v4.runtime.tree.TerminalNode;
import net.sourceforge.pmd.lang.ast.impl.antlr4.BaseAntlrInnerNode.PmdAsAntlrInnerNode;
/**
* Base class for the inner nodes (corresponds to {@link ParserRuleContext}).
* Use the {@code contextSuperClass} option to set this in the antlr g4 file,
* eg {@code options { contextSuperClass = SwiftInnerNode; }}.
*/
public abstract class BaseAntlrInnerNode<N extends AntlrNode<N>> extends BaseAntlrNode<PmdAsAntlrInnerNode<N>, N> {
public RecognitionException exception;
private final PmdAsAntlrInnerNode<N> antlrNode;
protected BaseAntlrInnerNode() {
antlrNode = new PmdAsAntlrInnerNode<>(this);
}
protected BaseAntlrInnerNode(ParserRuleContext parent, int invokingStateNumber) {
antlrNode = new PmdAsAntlrInnerNode<>(this, (PmdAsAntlrInnerNode<N>) parent, invokingStateNumber);
}
@Override
@SuppressWarnings("unchecked")
public N getChild(int index) {
if (0 <= index && index < getNumChildren()) {
N pmdNode = (N) antlrNode.getChild(index).getPmdNode();
assert pmdNode.getIndexInParent() == index;
return pmdNode;
}
throw new IndexOutOfBoundsException("Index " + index + ", numChildren " + getNumChildren());
}
@Override
public int getNumChildren() {
return antlrNode.getChildCount();
}
@Override
protected PmdAsAntlrInnerNode<N> asAntlrNode() {
return antlrNode;
}
protected abstract int getRuleIndex();
@Override
public Token getFirstAntlrToken() {
return asAntlrNode().start;
}
@Override
public Token getLastAntlrToken() {
return asAntlrNode().stop;
}
protected <T extends BaseAntlrInnerNode<N>> T getRuleContext(Class<T> klass, int idx) {
return children(klass).get(idx);
}
protected <T extends BaseAntlrInnerNode<N>> List<T> getRuleContexts(Class<T> klass) {
return children(klass).toList();
}
protected TerminalNode getToken(int kind, int idx) {
@SuppressWarnings("rawtypes")
BaseAntlrTerminalNode pmdWrapper =
children(BaseAntlrTerminalNode.class)
.filter(it -> it.getTokenKind() == kind)
.get(idx);
return pmdWrapper != null ? pmdWrapper.asAntlrNode() : null;
}
protected List<TerminalNode> getTokens(int kind) {
return children(BaseAntlrTerminalNode.class)
.filter(it -> it.getTokenKind() == kind)
.toList(BaseAntlrTerminalNode::asAntlrNode);
}
protected void copyFrom(BaseAntlrInnerNode<N> other) {
asAntlrNode().copyFrom(other.asAntlrNode());
}
public void enterRule(ParseTreeListener listener) {
// default does nothing
}
public void exitRule(ParseTreeListener listener) {
// default does nothing
}
protected static class PmdAsAntlrInnerNode<N extends AntlrNode<N>> extends ParserRuleContext implements RuleNode, BaseAntlrNode.AntlrToPmdParseTreeAdapter<N> {
private final BaseAntlrInnerNode<N> pmdNode;
PmdAsAntlrInnerNode(BaseAntlrInnerNode<N> node) {
this.pmdNode = node;
}
PmdAsAntlrInnerNode(BaseAntlrInnerNode<N> node, PmdAsAntlrInnerNode<N> parent, int invokingStateNumber) {
super(parent, invokingStateNumber);
this.pmdNode = node;
}
@Override
public BaseAntlrInnerNode<N> getPmdNode() {
return pmdNode;
}
@Override
@SuppressWarnings("unchecked")
public PmdAsAntlrInnerNode<N> getParent() {
return (PmdAsAntlrInnerNode<N>) super.getParent();
}
@Override
@SuppressWarnings("unchecked")
public AntlrToPmdParseTreeAdapter<N> getChild(int i) {
return (AntlrToPmdParseTreeAdapter<N>) super.getChild(i);
}
@Override
public <T extends ParseTree> T addAnyChild(T t) {
assert t instanceof AntlrToPmdParseTreeAdapter;
BaseAntlrNode<?, ?> pmdNode = ((AntlrToPmdParseTreeAdapter<?>) t).getPmdNode();
pmdNode.setIndexInParent(getChildCount());
return super.addAnyChild(t);
}
@Override
public void setParent(RuleContext parent) {
assert parent instanceof PmdAsAntlrInnerNode;
super.setParent(parent);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
throw new UnsupportedOperationException("Cannot visit the underlying antlr nodes");
}
}
}
| 5,136 | 30.906832 | 163 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrTokenManager.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.BaseErrorListener;
import org.antlr.v4.runtime.Lexer;
import org.antlr.v4.runtime.RecognitionException;
import org.antlr.v4.runtime.Recognizer;
import net.sourceforge.pmd.lang.TokenManager;
import net.sourceforge.pmd.lang.ast.TokenMgrError;
import net.sourceforge.pmd.lang.document.TextDocument;
/**
* Generic token manager implementation for all Antlr lexers.
*/
public class AntlrTokenManager implements TokenManager<AntlrToken> {
private final Lexer lexer;
private final TextDocument textDoc;
private AntlrToken previousToken;
public AntlrTokenManager(final Lexer lexer, final TextDocument textDocument) {
this.lexer = lexer;
this.textDoc = textDocument;
resetListeners();
}
@Override
public AntlrToken getNextToken() {
AntlrToken nextToken = getNextTokenFromAnyChannel();
while (!nextToken.isDefault()) {
nextToken = getNextTokenFromAnyChannel();
}
return nextToken;
}
private AntlrToken getNextTokenFromAnyChannel() {
final AntlrToken previousComment = previousToken != null && previousToken.isHidden() ? previousToken : null;
final AntlrToken currentToken = new AntlrToken(lexer.nextToken(), previousComment, textDoc);
if (previousToken != null) {
previousToken.next = currentToken;
}
previousToken = currentToken;
return currentToken;
}
private void resetListeners() {
lexer.removeErrorListeners();
lexer.addErrorListener(new ErrorHandler());
}
private final class ErrorHandler extends BaseErrorListener {
@Override
public void syntaxError(final Recognizer<?, ?> recognizer,
final Object offendingSymbol,
final int line,
final int charPositionInLine,
final String msg,
final RecognitionException ex) {
throw new TokenMgrError(line, charPositionInLine, textDoc.getFileId(), msg, ex);
}
}
}
| 2,273 | 31.028169 | 116 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/impl/antlr4/AntlrBaseParser.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.impl.antlr4;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CharStreams;
import org.antlr.v4.runtime.Lexer;
import net.sourceforge.pmd.lang.ast.ParseException;
import net.sourceforge.pmd.lang.ast.Parser;
import net.sourceforge.pmd.lang.ast.RootNode;
/**
* Generic Antlr parser adapter for all Antlr parsers. This wraps a parser
* generated by antlr, soo {@link AntlrGeneratedParserBase}.
*
* @param <N> Supertype of all nodes for the language, eg SwiftNode
* @param <R> Type of the root node
*/
public abstract class AntlrBaseParser<
N extends AntlrNode<N>,
R extends BaseAntlrInnerNode<N> & RootNode
> implements Parser {
@Override
public R parse(ParserTask task) throws ParseException {
CharStream cs = CharStreams.fromString(task.getSourceText(), task.getTextDocument().getFileId().getAbsolutePath());
return parse(getLexer(cs), task);
}
protected abstract R parse(Lexer parser, ParserTask task);
protected abstract Lexer getLexer(CharStream source);
}
| 1,162 | 30.432432 | 123 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/AxisStream.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import static java.lang.Math.max;
import static java.lang.Math.min;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Function;
import java.util.function.Predicate;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* Stream that iterates over one axis of the tree.
*/
abstract class AxisStream<T extends Node> extends IteratorBasedNStream<T> {
/** Spec of this field depends on the subclass. */
protected final Node node;
/** Filter, for no filter, this is {@link Filtermap#NODE_IDENTITY}. */
protected final Filtermap<Node, ? extends T> filter;
AxisStream(@NonNull Node root, Filtermap<Node, ? extends T> filter) {
super();
this.node = root;
this.filter = filter;
}
@Override
@SuppressWarnings("unchecked")
public final Iterator<T> iterator() {
return (Iterator<T>) filter.filterMap(baseIterator());
}
protected abstract Iterator<Node> baseIterator();
@Override
public <R extends Node> NodeStream<@NonNull R> map(Function<? super T, ? extends @Nullable R> mapper) {
return copyWithFilter(filter.thenApply(mapper));
}
@Override
public NodeStream<T> filter(Predicate<? super @NonNull T> predicate) {
return copyWithFilter(filter.thenFilter(predicate));
}
@Override
public <S extends Node> NodeStream<S> filterIs(Class<? extends S> r1Class) {
return copyWithFilter(filter.thenCast(r1Class));
}
/*
* Override one of these three to implement all the overloads of first/last/toList
*/
protected <O extends Node> @Nullable O firstImpl(Filtermap<? super Node, ? extends O> filter) {
Iterator<? extends O> iter = filter.filterMap(baseIterator());
return iter.hasNext() ? iter.next() : null;
}
protected <O extends Node> @Nullable O lastImpl(Filtermap<? super Node, ? extends O> filter) {
Iterator<? extends O> iter = filter.filterMap(baseIterator());
return IteratorUtil.last(iter);
}
protected <O> List<O> toListImpl(Filtermap<? super Node, ? extends O> filter) {
Iterator<? extends O> iter = filter.filterMap(baseIterator());
return IteratorUtil.toList(iter);
}
@Override
public @Nullable T first() {
return firstImpl(filter);
}
@Override
public <R extends Node> @Nullable R first(Class<? extends R> r1Class) {
return firstImpl(filter.thenCast(r1Class));
}
@Override
public @Nullable T first(Predicate<? super T> predicate) {
return firstImpl(filter.thenFilter(predicate));
}
@Nullable
@Override
public T last() {
return lastImpl(filter);
}
@Override
public List<T> toList() {
return toListImpl(this.filter);
}
@Override
public <R> List<R> toList(Function<? super T, ? extends R> mapper) {
return toListImpl(this.filter.thenApply(mapper));
}
@Override
public <R extends Node> @Nullable R last(Class<? extends R> rClass) {
return lastImpl(filter.thenCast(rClass));
}
@Override
public String toString() {
return getClass().getSimpleName() + "[" + node + "] -> " + toList();
}
/**
* Returns a copy of this instance, with the given filter.
* Implementations of this method should not compose the given filter
* with their current filter.
*/
protected abstract <S extends Node> NodeStream<S> copyWithFilter(Filtermap<Node, ? extends S> filterMap);
static class FilteredAncestorOrSelfStream<T extends Node> extends AxisStream<T> {
// the first node always matches the filter
FilteredAncestorOrSelfStream(@NonNull T node, Filtermap<Node, ? extends T> target) {
super(node, target);
}
@Override
protected Iterator<Node> baseIterator() {
return new AncestorOrSelfIterator(node);
}
@Override
public NodeStream<T> drop(int n) {
AssertionUtil.requireNonNegative("n", n);
if (n == 0) {
return this;
}
// eg for NodeStream.of(a,b,c).drop(2)
Node nth = get(n); // get(2) == c
return StreamImpl.ancestorsOrSelf(nth, filter); // c.ancestorsOrSelf() == [c]
}
@Override
protected <S extends Node> NodeStream<S> copyWithFilter(Filtermap<Node, ? extends S> filterMap) {
S newFirst = TraversalUtils.getFirstParentOrSelfMatching(node, filterMap);
if (newFirst == null) {
return StreamImpl.empty();
} else {
return new FilteredAncestorOrSelfStream<>(newFirst, filterMap);
}
}
@Override
public @Nullable T first() {
return (T) node;
}
@Override
public boolean nonEmpty() {
return true;
}
@Override
protected <O extends Node> @Nullable O firstImpl(Filtermap<? super Node, ? extends O> filter) {
return TraversalUtils.getFirstParentOrSelfMatching(node, filter);
}
}
static class AncestorOrSelfStream extends FilteredAncestorOrSelfStream<Node> {
AncestorOrSelfStream(@NonNull Node node) {
super(node, Filtermap.NODE_IDENTITY);
}
@Nullable
@Override
public Node first() {
return node;
}
@Override
public boolean nonEmpty() {
return true;
}
@Override
public @Nullable Node last() {
Node last = node;
while (last.getParent() != null) {
last = last.getParent();
}
return last;
}
}
abstract static class DescendantStreamBase<T extends Node> extends AxisStream<T> implements DescendantNodeStream<T> {
final TreeWalker walker;
DescendantStreamBase(@NonNull Node root,
TreeWalker walker,
Filtermap<Node, ? extends T> filter) {
super(root, filter);
this.walker = walker;
}
protected abstract <S extends Node> DescendantNodeStream<S> copyWithWalker(Filtermap<Node, ? extends S> filterMap, TreeWalker walker);
@Override
public DescendantNodeStream<T> crossFindBoundaries(boolean cross) {
return walker.isCrossFindBoundaries() == cross
? this
: copyWithWalker(this.filter, walker.crossFindBoundaries(cross));
}
@Override
protected <S extends Node> NodeStream<S> copyWithFilter(Filtermap<Node, ? extends S> filterMap) {
return copyWithWalker(filterMap, walker);
}
}
static class FilteredDescendantStream<T extends Node> extends DescendantStreamBase<T> {
FilteredDescendantStream(Node node,
TreeWalker walker,
Filtermap<Node, ? extends T> target) {
super(node, walker, target);
}
@Override
protected Iterator<Node> baseIterator() {
return walker.descendantIterator(node);
}
@Override
protected <S extends Node> DescendantNodeStream<S> copyWithWalker(Filtermap<Node, ? extends S> filterMap, TreeWalker walker) {
return new FilteredDescendantStream<>(node, walker, filterMap);
}
@Override
protected <O extends Node> @Nullable O firstImpl(Filtermap<? super Node, ? extends O> filter) {
return walker.getFirstDescendantOfType(node, filter);
}
@Override
public boolean nonEmpty() {
return walker.getFirstDescendantOfType(node, filter) != null;
}
@Override
protected <O> List<O> toListImpl(Filtermap<? super Node, ? extends O> filter) {
return walker.findDescendantsMatching(node, filter);
}
}
static class DescendantStream extends FilteredDescendantStream<Node> {
DescendantStream(Node node, TreeWalker walker) {
super(node, walker, Filtermap.NODE_IDENTITY);
}
@Override
public DescendantNodeStream<Node> crossFindBoundaries(boolean cross) {
return new DescendantStream(node, walker.crossFindBoundaries(cross));
}
@Override
public boolean nonEmpty() {
return node.getNumChildren() > 0;
}
}
static class FilteredDescendantOrSelfStream<T extends Node> extends DescendantStreamBase<T> {
FilteredDescendantOrSelfStream(Node node,
TreeWalker walker,
Filtermap<Node, ? extends T> filtermap) {
super(node, walker, filtermap);
}
@Override
public Iterator<Node> baseIterator() {
return walker.descendantOrSelfIterator(node);
}
@Override
protected <S extends Node> DescendantNodeStream<S> copyWithWalker(Filtermap<Node, ? extends S> filterMap, TreeWalker walker) {
return new FilteredDescendantOrSelfStream<>(node, walker, filterMap);
}
@Override
protected <O> List<O> toListImpl(Filtermap<? super Node, ? extends O> filter) {
List<O> result = new ArrayList<>();
O top = filter.apply(node);
if (top != null) {
result.add(top);
}
walker.findDescendantsMatching(node, filter, result);
return result;
}
}
static final class DescendantOrSelfStream extends FilteredDescendantOrSelfStream<Node> {
DescendantOrSelfStream(Node node, TreeWalker walker) {
super(node, walker, Filtermap.NODE_IDENTITY);
}
@Override
public DescendantNodeStream<Node> crossFindBoundaries(boolean cross) {
return new DescendantOrSelfStream(node, walker.crossFindBoundaries(cross));
}
@Nullable
@Override
public Node first() {
return node;
}
@Override
public boolean nonEmpty() {
return true;
}
}
/**
* Implements following/preceding sibling streams, and children streams.
*/
static class FilteredChildrenStream<T extends Node> extends AxisStream<T> {
final int low; // inclusive
final int len;
FilteredChildrenStream(@NonNull Node root, Filtermap<Node, ? extends T> filtermap, int low, int len) {
super(root, filtermap);
this.low = low;
this.len = len;
}
@Override
public <R extends Node> NodeStream<R> flatMap(Function<? super T, ? extends @Nullable NodeStream<? extends R>> mapper) {
// all operations like #children, #followingSiblings, etc
// operate on an eagerly evaluated stream. May be empty or
// singleton
return StreamImpl.fromNonNullList(toList()).flatMap(mapper);
}
@Override
protected <S extends Node> NodeStream<S> copyWithFilter(Filtermap<Node, ? extends S> filterMap) {
return new FilteredChildrenStream<>(node, filterMap, low, len);
}
@Override
public Spliterator<T> spliterator() {
return Spliterators.spliterator(iterator(), count(), Spliterator.SIZED | Spliterator.ORDERED);
}
@Override
protected Iterator<Node> baseIterator() {
return TraversalUtils.childrenIterator(node, low, low + len);
}
@Override
protected <O extends Node> @Nullable O firstImpl(Filtermap<? super Node, ? extends O> filter) {
return TraversalUtils.getFirstChildMatching(node, filter, low, len);
}
@Override
protected <O extends Node> @Nullable O lastImpl(Filtermap<? super Node, ? extends O> filter) {
return TraversalUtils.getLastChildMatching(node, filter, low, len);
}
@Override
public int count() {
return TraversalUtils.countChildrenMatching(node, filter, low, len);
}
@Override
public boolean nonEmpty() {
return first() != null;
}
@Override
protected <O> List<O> toListImpl(Filtermap<? super Node, ? extends O> filter) {
return TraversalUtils.findChildrenMatching(node, filter, low, len);
}
@Override
public NodeStream<T> take(int maxSize) {
AssertionUtil.requireNonNegative("maxSize", maxSize);
// eager evaluation
if (maxSize == 1) {
return NodeStream.of(TraversalUtils.getFirstChildMatching(node, filter, low, len));
}
List<T> matching = TraversalUtils.findChildrenMatching(node, filter, low, len, maxSize);
return StreamImpl.fromNonNullList(matching);
}
@Override
public NodeStream<T> drop(int n) {
AssertionUtil.requireNonNegative("n", n);
if (n == 0) {
return this;
}
return StreamImpl.fromNonNullList(toList()).drop(n);
}
@Override
public String toString() {
return "FilteredSlice[" + node + ", " + low + ".." + (low + len) + "] -> " + toList();
}
}
/** Implements following/preceding sibling streams. */
static class ChildrenStream extends FilteredChildrenStream<Node> {
ChildrenStream(@NonNull Node root, int low, int len) {
super(root, Filtermap.NODE_IDENTITY, low, len);
}
@Nullable
@Override
public Node first() {
return len > 0 ? node.getChild(low) : null;
}
@Nullable
@Override
public Node last() {
return len > 0 ? node.getChild(low + len - 1) : null;
}
@Nullable
@Override
public Node get(int n) {
AssertionUtil.requireNonNegative("n", n);
return len > 0 && n < len ? node.getChild(low + n) : null;
}
@Override
public NodeStream<Node> take(int maxSize) {
AssertionUtil.requireNonNegative("maxSize", maxSize);
return StreamImpl.sliceChildren(node, filter, low, min(maxSize, len));
}
@Override
public NodeStream<Node> drop(int n) {
AssertionUtil.requireNonNegative("n", n);
if (n == 0) {
return this;
}
int newLow = min(low + n, node.getNumChildren());
int newLen = max(len - n, 0);
return StreamImpl.sliceChildren(node, filter, newLow, newLen);
}
@Override
public NodeStream<Node> dropLast(int n) {
AssertionUtil.requireNonNegative("n", n);
if (n == 0) {
return this;
}
return take(max(len - n, 0));
}
@Override
public boolean nonEmpty() {
return len > 0;
}
@Override
public int count() {
return len;
}
@Override
public String toString() {
return "Slice[" + node + ", " + low + ".." + (low + len) + "] -> " + toList();
}
}
}
| 15,773 | 30.48503 | 142 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/GreedyNStream.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Function;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.CollectionUtil;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* A greedy stream evaluates all axis operations, except for descendants,
* greedily.
*/
abstract class GreedyNStream<T extends Node> extends IteratorBasedNStream<T> {
@Override
protected <R extends Node> NodeStream<R> mapIter(Function<Iterator<T>, Iterator<R>> fun) {
return StreamImpl.fromNonNullList(IteratorUtil.toNonNullList(fun.apply(iterator())));
}
@Override
public T first() {
return toList().get(0);
}
@Override
public @Nullable T get(int n) {
AssertionUtil.requireNonNegative("n", n);
List<T> tList = toList();
return n < tList.size() ? tList.get(n) : null;
}
@Override
public Iterator<T> iterator() {
return toList().iterator();
}
@Override
public int count() {
return toList().size();
}
@Override
public NodeStream<T> drop(int n) {
if (n == 0) {
return this;
}
return StreamImpl.fromNonNullList(CollectionUtil.drop(toList(), n));
}
@Override
public NodeStream<T> take(int maxSize) {
if (maxSize >= count()) {
return this;
}
return StreamImpl.fromNonNullList(CollectionUtil.take(toList(), maxSize));
}
@Override
public abstract List<T> toList();
@Override
public Spliterator<T> spliterator() {
Spliterator<T> spliter = toList().spliterator();
return Spliterators.spliterator(iterator(), spliter.estimateSize(),
spliter.characteristics() | Spliterator.NONNULL);
}
@Override
public NodeStream<T> cached() {
return this;
}
static class GreedyKnownNStream<T extends Node> extends GreedyNStream<T> {
private final List<@NonNull T> coll;
GreedyKnownNStream(List<@NonNull T> coll) {
this.coll = coll;
}
@Override
public List<T> toList() {
return coll;
}
}
}
| 2,607 | 25.08 | 94 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/Filtermap.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Iterator;
import java.util.Objects;
import java.util.function.Function;
import java.util.function.Predicate;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* Combined filter/map predicate. Cannot accept null values.
*
* @param <I> Input type, contravariant
* @param <O> Output type, covariant
*/
@FunctionalInterface
interface Filtermap<I, O> extends Function<@NonNull I, @Nullable O>, Predicate<@NonNull I> {
Filtermap<Node, Node> NODE_IDENTITY = identityFilter();
/**
* Returns a null value if the filter accepts the value. Otherwise
* returns the transformed value. MUST return null for null parameter.
*/
@Override
@Nullable O apply(@Nullable I i);
@Override
default boolean test(@Nullable I i) {
return apply(i) != null;
}
/** Filter an iterator. */
default Iterator<O> filterMap(Iterator<? extends I> iter) {
return IteratorUtil.mapNotNull(iter, this);
}
/** Compose a new Filtermap, coalescing null values. */
default <R> Filtermap<I, R> thenApply(Function<@NonNull ? super O, @Nullable ? extends R> then) {
Objects.requireNonNull(then);
return i -> {
if (i == null) {
return null;
}
O o = this.apply(i);
return o == null ? null : then.apply(o);
};
}
default <R> Filtermap<I, R> thenCast(Class<? extends R> rClass) {
return thenApply(isInstance(rClass));
}
default Filtermap<I, O> thenFilter(Predicate<? super O> rClass) {
return thenApply(filter(rClass));
}
static <I> Filtermap<I, I> identityFilter() {
return new Filtermap<I, I>() {
@Override
public I apply(@Nullable I i) {
return i;
}
@Override
@SuppressWarnings("unchecked")
public <R> Filtermap<I, R> thenApply(Function<@NonNull ? super I, @Nullable ? extends R> then) {
return then instanceof Filtermap ? (Filtermap<I, R>) then : Filtermap.super.thenApply(then);
}
@Override
@SuppressWarnings("unchecked")
public Iterator<I> filterMap(Iterator<? extends I> iter) {
return (Iterator<I>) iter;
}
@Override
public String toString() {
return "IdentityFilter";
}
};
}
static <I extends O, O> Filtermap<I, O> filter(Predicate<? super @NonNull I> pred) {
return i -> i != null && pred.test(i) ? i : null;
}
static <I, O> Filtermap<I, O> isInstance(Class<? extends O> oClass) {
if (oClass == Node.class) {
return (Filtermap<I, O>) NODE_IDENTITY;
}
return new Filtermap<I, O>() {
@Override
@SuppressWarnings("unchecked")
public @Nullable O apply(@Nullable I i) {
return oClass.isInstance(i) ? (O) i : null;
}
@Override
public String toString() {
return "IsInstance[" + oClass + "]";
}
};
}
}
| 3,441 | 26.536 | 108 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/TreeWalker.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.Iterator;
import java.util.List;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream.DescendantNodeStream;
/**
* Object performing tree traversals. Configuration options can be
* extended later on.
*
* @see DescendantNodeStream
*/
final class TreeWalker {
private static final TreeWalker CROSS = new TreeWalker(true);
private static final TreeWalker DONT_CROSS = new TreeWalker(false);
/**
* Default traversal config used by methods like {@link Node#descendants()}
*/
static final TreeWalker DEFAULT = DONT_CROSS;
private final boolean crossFindBoundaries;
private TreeWalker(boolean crossFindBoundaries) {
this.crossFindBoundaries = crossFindBoundaries;
}
public boolean isCrossFindBoundaries() {
return crossFindBoundaries;
}
/**
* Returns a new walker with the given behaviour for find boundaries.
*/
TreeWalker crossFindBoundaries(boolean cross) {
return cross ? CROSS : DONT_CROSS;
}
/**
* Configure the given node stream to use this walker.
*/
<T extends Node> DescendantNodeStream<T> apply(DescendantNodeStream<T> stream) {
return stream.crossFindBoundaries(crossFindBoundaries);
}
<T> void findDescendantsMatching(final Node node,
final Filtermap<? super Node, ? extends T> filtermap,
final List<T> results) {
for (int i = 0; i < node.getNumChildren(); i++) {
final Node child = node.getChild(i);
final T mapped = filtermap.apply(child);
if (mapped != null) {
results.add(mapped);
}
if (isCrossFindBoundaries() || !child.isFindBoundary()) {
this.findDescendantsMatching(child, filtermap, results);
}
}
}
<T extends Node> T getFirstDescendantOfType(final Node node, final Filtermap<? super Node, ? extends T> filtermap) {
final int n = node.getNumChildren();
for (int i = 0; i < n; i++) {
Node child = node.getChild(i);
final T t = filtermap.apply(child);
if (t != null) {
return t;
} else if (isCrossFindBoundaries() || !child.isFindBoundary()) {
final T n2 = this.getFirstDescendantOfType(child, filtermap);
if (n2 != null) {
return n2;
}
}
}
return null;
}
<T> List<T> findDescendantsMatching(final Node node, final Filtermap<? super Node, ? extends T> filtermap) {
List<T> results = new ArrayList<>();
findDescendantsMatching(node, filtermap, results);
return results;
}
Iterator<Node> descendantOrSelfIterator(Node top) {
return new DescendantOrSelfIterator(top, this);
}
Iterator<Node> descendantIterator(Node top) {
DescendantOrSelfIterator iter = new DescendantOrSelfIterator(top, this);
iter.next(); // skip self
return iter;
}
/** Iterates over a node and its descendants. */
private static class DescendantOrSelfIterator implements Iterator<@NonNull Node> {
private final Deque<Node> queue = new ArrayDeque<>();
private final TreeWalker config;
private boolean isFirst;
/** Always {@link #hasNext()} after exiting the constructor. */
DescendantOrSelfIterator(Node top, TreeWalker walker) {
this.config = walker;
this.isFirst = true;
queue.addFirst(top);
}
@Override
public boolean hasNext() {
return !queue.isEmpty();
}
@Override
public @NonNull Node next() {
Node node = queue.removeFirst();
enqueueChildren(node);
isFirst = false;
return node;
}
private void enqueueChildren(Node n) {
// on the first node, we must cross find boundaries anyway
if (config.isCrossFindBoundaries() || !n.isFindBoundary() || isFirst) {
for (int i = n.getNumChildren() - 1; i >= 0; i--) {
queue.addFirst(n.getChild(i));
}
}
}
}
}
| 4,588 | 29.593333 | 120 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/SingletonNodeStream.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.lang.ast.NodeStream.DescendantNodeStream;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* Optimised node stream implementation for a single element. Streams
* returned by eg {@link #descendants()} have optimised implementations
* for several common operations which most of the time don't need to
* iterate a stream directly. Their performance is equivalent to pre
* 7.0.0 traversal operations defined on the {@link Node} interface.
* When they don't have an optimised implementation, they fall back on
* stream processing.
*
* <p>This ensures that short pipelines like {@code node.descendants().first()}
* are as efficient as the pre 7.0.0 methods.
*/
final class SingletonNodeStream<T extends Node> extends IteratorBasedNStream<T> implements DescendantNodeStream<T> {
private final T node;
SingletonNodeStream(@NonNull T node) {
assert node != null : "null node!";
this.node = node;
}
@Override
public Stream<T> toStream() {
return Stream.of(node);
}
@Override
public List<T> toList() {
return Collections.singletonList(node);
}
@Override
public <R> List<R> toList(Function<? super T, ? extends R> mapper) {
return Collections.singletonList(mapper.apply(node));
}
@Override
public int count() {
return 1;
}
@Override
public T first() {
return node;
}
@Override
public T last() {
return node;
}
@Override
public boolean nonEmpty() {
return true;
}
@Override
public Iterator<T> iterator() {
return IteratorUtil.singletonIterator(node);
}
@Override
public void forEach(Consumer<? super T> action) {
action.accept(node);
}
@Override
public NodeStream<T> filter(Predicate<? super T> predicate) {
return predicate.test(node) ? this : NodeStream.empty();
}
@Override
public NodeStream<T> drop(int n) {
AssertionUtil.requireNonNegative("n", n);
return n == 0 ? this : NodeStream.empty();
}
@Override
public NodeStream<T> take(int maxSize) {
AssertionUtil.requireNonNegative("maxSize", maxSize);
return maxSize >= 1 ? this : NodeStream.empty();
}
@Override
public NodeStream<T> cached() {
return this;
}
@Override
public NodeStream<T> distinct() {
return this;
}
@Override
public NodeStream<T> takeWhile(Predicate<? super T> predicate) {
return filter(predicate);
}
@Override
public <R extends Node> NodeStream<@NonNull R> map(Function<? super T, ? extends @Nullable R> mapper) {
return NodeStream.of(mapper.apply(node));
}
@Override
@SuppressWarnings("unchecked")
public <R extends Node> NodeStream<R> flatMap(Function<? super T, ? extends NodeStream<? extends R>> mapper) {
return (NodeStream<R>) mapper.apply(node);
}
@Override
public boolean any(Predicate<? super T> predicate) {
return predicate.test(node);
}
@Override
public boolean all(Predicate<? super T> predicate) {
return predicate.test(node);
}
@Override
public boolean none(Predicate<? super T> predicate) {
return !predicate.test(node);
}
/*
tree navigation
*/
@Override
public NodeStream<Node> children() {
return StreamImpl.children(node);
}
@Override
public <R extends Node> NodeStream<R> children(Class<? extends R> rClass) {
return StreamImpl.children(node, rClass);
}
@Override
public <R extends Node> NodeStream<R> firstChild(Class<? extends R> rClass) {
return NodeStream.of(node.firstChild(rClass));
}
@Override
public NodeStream<Node> parents() {
return NodeStream.of(node.getParent());
}
@Override
public NodeStream<Node> ancestors() {
return StreamImpl.ancestors(node);
}
@Override
public <R extends Node> NodeStream<R> ancestors(Class<? extends R> rClass) {
return StreamImpl.ancestors(node, rClass);
}
@Override
public NodeStream<Node> ancestorsOrSelf() {
return StreamImpl.ancestorsOrSelf(node);
}
@Override
public DescendantNodeStream<Node> descendants() {
return StreamImpl.descendants(node);
}
@Override
public <R extends Node> DescendantNodeStream<R> descendants(Class<? extends R> rClass) {
return StreamImpl.descendants(node, rClass);
}
@Override
public DescendantNodeStream<Node> descendantsOrSelf() {
return StreamImpl.descendantsOrSelf(node);
}
@Override
public NodeStream<Node> followingSiblings() {
return StreamImpl.followingSiblings(node);
}
@Override
public NodeStream<Node> precedingSiblings() {
return StreamImpl.precedingSiblings(node);
}
@Override
public DescendantNodeStream<T> crossFindBoundaries(boolean cross) {
return this; // doesn't mean anything
}
}
| 5,670 | 25.013761 | 116 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/StreamImpl.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Consumer;
import java.util.function.Function;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.lang.ast.NodeStream.DescendantNodeStream;
import net.sourceforge.pmd.lang.ast.impl.AbstractNode;
import net.sourceforge.pmd.lang.ast.impl.GenericNode;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.AncestorOrSelfStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.ChildrenStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.DescendantOrSelfStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.DescendantStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.FilteredAncestorOrSelfStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.FilteredChildrenStream;
import net.sourceforge.pmd.lang.ast.internal.AxisStream.FilteredDescendantStream;
import net.sourceforge.pmd.lang.ast.internal.GreedyNStream.GreedyKnownNStream;
import net.sourceforge.pmd.util.IteratorUtil;
public final class StreamImpl {
@SuppressWarnings({"rawtypes", "PMD.UseDiamondOperator"})
private static final DescendantNodeStream EMPTY = new EmptyNodeStream();
private StreamImpl() {
// utility class
}
public static <T extends Node> DescendantNodeStream<T> singleton(@NonNull T node) {
return new SingletonNodeStream<>(node);
}
public static <T extends Node> NodeStream<T> fromIterable(Iterable<? extends @Nullable T> iterable) {
if (iterable instanceof Collection) {
Collection<? extends @Nullable T> coll = (Collection<T>) iterable;
if (coll.isEmpty()) {
return empty();
} else if (coll.size() == 1) {
return NodeStream.of(coll.iterator().next());
}
}
return fromNonNullList(IteratorUtil.toNonNullList(iterable.iterator()));
}
public static <T extends Node> NodeStream<T> union(Iterable<? extends @Nullable NodeStream<? extends T>> streams) {
return new IteratorBasedNStream<T>() {
@Override
public Iterator<T> iterator() {
return IteratorUtil.flatMap(streams.iterator(), NodeStream::iterator);
}
};
}
@SuppressWarnings("unchecked")
public static <T extends Node> DescendantNodeStream<T> empty() {
return EMPTY;
}
public static <R extends Node> NodeStream<R> children(@NonNull Node node, Class<? extends R> target) {
return sliceChildren(node, Filtermap.isInstance(target), 0, node.getNumChildren());
}
public static NodeStream<Node> children(@NonNull Node node) {
return sliceChildren(node, Filtermap.NODE_IDENTITY, 0, node.getNumChildren());
}
/**
* The optimized implementation of {@link NodeStream#children()} for
* {@link AbstractNode}. It is important that it returns always the
* same node stream type and makes no effort to pick an empty or singleton
* stream if possible. That allows the JVM to devirtualize calls.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <N extends GenericNode<N>> NodeStream<N> childrenArray(GenericNode<N> parent,
Node @NonNull [] array) {
return (NodeStream) new ChildrenStream(parent, 0, parent.getNumChildren()) {
@Override
public void forEach(Consumer<? super @NonNull Node> action) {
// Looping on the array directly is about twice faster than
// the default implementation.
for (Node child : array) {
action.accept(child);
}
}
@Override
protected Iterator<Node> baseIterator() {
return Arrays.asList(array).iterator();
}
};
}
public static DescendantNodeStream<Node> descendants(@NonNull Node node) {
return node.getNumChildren() == 0 ? empty() : new DescendantStream(node, TreeWalker.DEFAULT);
}
public static <R extends Node> DescendantNodeStream<R> descendants(@NonNull Node node, Class<? extends R> rClass) {
return node.getNumChildren() == 0 ? empty()
: new FilteredDescendantStream<>(node, TreeWalker.DEFAULT, Filtermap.isInstance(rClass));
}
public static DescendantNodeStream<Node> descendantsOrSelf(@NonNull Node node) {
return node.getNumChildren() == 0 ? singleton(node) : new DescendantOrSelfStream(node, TreeWalker.DEFAULT);
}
public static NodeStream<Node> followingSiblings(@NonNull Node node) {
Node parent = node.getParent();
if (parent == null || parent.getNumChildren() == 1) {
return NodeStream.empty();
}
return sliceChildren(parent, Filtermap.NODE_IDENTITY,
node.getIndexInParent() + 1,
parent.getNumChildren() - node.getIndexInParent() - 1
);
}
public static NodeStream<Node> precedingSiblings(@NonNull Node node) {
Node parent = node.getParent();
if (parent == null || parent.getNumChildren() == 1) {
return NodeStream.empty();
}
return sliceChildren(parent, Filtermap.NODE_IDENTITY, 0, node.getIndexInParent());
}
static <T extends Node> NodeStream<T> sliceChildren(Node parent, Filtermap<Node, ? extends T> filtermap, int from, int length) {
// these assertions are just for tests
assert parent != null;
assert from >= 0 && from <= parent.getNumChildren() : "from should be a valid index";
assert length >= 0 : "length should not be negative";
assert from + length >= 0 && from + length <= parent.getNumChildren() : "from+length should be a valid index";
if (length == 0) {
return empty();
} else if (filtermap == Filtermap.NODE_IDENTITY) { // NOPMD CompareObjectsWithEquals
@SuppressWarnings("unchecked")
NodeStream<T> res = length == 1 ? (NodeStream<T>) singleton(parent.getChild(from))
: (NodeStream<T>) new ChildrenStream(parent, from, length);
return res;
} else {
if (length == 1) {
// eager evaluation, empty or singleton
return NodeStream.of(filtermap.apply(parent.getChild(from)));
} else {
return new FilteredChildrenStream<>(parent, filtermap, from, length);
}
}
}
public static NodeStream<Node> ancestorsOrSelf(@Nullable Node node) {
return ancestorsOrSelf(node, Filtermap.NODE_IDENTITY);
}
static <T extends Node> NodeStream<T> ancestorsOrSelf(@Nullable Node node, Filtermap<Node, ? extends T> target) {
if (node == null) {
return empty();
}
if (target == Filtermap.NODE_IDENTITY) { // NOPMD CompareObjectsWithEquals
return (NodeStream<T>) new AncestorOrSelfStream(node);
}
T first = TraversalUtils.getFirstParentOrSelfMatching(node, target);
if (first == null) {
return empty();
}
return new FilteredAncestorOrSelfStream<>(first, target);
}
public static NodeStream<Node> ancestors(@NonNull Node node) {
return ancestorsOrSelf(node.getParent());
}
static <R extends Node> NodeStream<R> ancestors(@NonNull Node node, Filtermap<Node, ? extends R> target) {
return ancestorsOrSelf(node.getParent(), target);
}
public static <R extends Node> NodeStream<R> ancestors(@NonNull Node node, Class<? extends R> target) {
return ancestorsOrSelf(node.getParent(), Filtermap.isInstance(target));
}
static <T extends Node> NodeStream<T> fromNonNullList(List<@NonNull T> coll) {
if (coll.isEmpty()) {
return empty();
} else if (coll.size() == 1) {
return singleton(coll.get(0));
}
return new GreedyKnownNStream<>(coll);
}
private static final class EmptyNodeStream<N extends Node> extends IteratorBasedNStream<N> implements DescendantNodeStream<N> {
@Override
protected <R extends Node> NodeStream<R> mapIter(Function<Iterator<N>, Iterator<R>> fun) {
return StreamImpl.empty();
}
@Override
protected @NonNull <R extends Node> DescendantNodeStream<R> flatMapDescendants(Function<N, DescendantNodeStream<? extends R>> mapper) {
return StreamImpl.empty();
}
@Override
public DescendantNodeStream<N> crossFindBoundaries(boolean cross) {
return this;
}
@Override
public Iterator<N> iterator() {
return Collections.emptyIterator();
}
@Override
public List<N> toList() {
return Collections.emptyList();
}
@Override
public <R> List<R> toList(Function<? super N, ? extends R> mapper) {
return Collections.emptyList();
}
@Override
public Spliterator<N> spliterator() {
return Spliterators.emptySpliterator();
}
@Override
public NodeStream<N> cached() {
return this;
}
@Override
public String toString() {
return "EmptyStream";
}
}
}
| 9,889 | 36.89272 | 143 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/AncestorOrSelfIterator.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.ast.Node;
/** Iterates over a node and its ancestors. */
class AncestorOrSelfIterator implements Iterator<@NonNull Node> {
private Node next;
AncestorOrSelfIterator(@NonNull Node top) {
next = top;
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public Node next() {
Node n = next;
if (n == null) {
throw new NoSuchElementException();
}
next = n.getParent();
return n;
}
}
| 805 | 19.15 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/TraversalUtils.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.ast.Node;
final class TraversalUtils {
/*
Note that the methods of this class must not use node streams
to iterate on children, because node streams are implemented
using these methods.
*/
private TraversalUtils() {
}
static <T extends Node> T getFirstParentOrSelfMatching(final Node node, final Filtermap<? super Node, ? extends T> filter) {
Node n = node;
while (n != null) {
T t = filter.apply(n);
if (t != null) {
return t;
}
n = n.getParent();
}
return null;
}
static <T extends Node> T getFirstChildMatching(final Node node, final Filtermap<? super Node, ? extends T> filter, int from, int len) {
for (int i = from, last = from + len; i < last; i++) {
Node c = node.getChild(i);
T t = filter.apply(c);
if (t != null) {
return t;
}
}
return null;
}
static <T extends Node> T getLastChildMatching(final Node node, final Filtermap<? super Node, ? extends T> filter, int from, int len) {
for (int i = from + len - 1; i >= from; i--) {
Node c = node.getChild(i);
T t = filter.apply(c);
if (t != null) {
return t;
}
}
return null;
}
static <T> List<T> findChildrenMatching(final Node node, final Filtermap<? super Node, ? extends T> filter, int from, int len) {
return findChildrenMatching(node, filter, from, len, Integer.MAX_VALUE);
}
static <T> List<T> findChildrenMatching(final Node node, final Filtermap<? super Node, ? extends T> filter, int from, int len, int maxSize) {
if (maxSize == 0) {
return Collections.emptyList();
}
List<T> list = new ArrayList<>();
for (int i = from, last = from + len; i < last; i++) {
Node c = node.getChild(i);
T t = filter.apply(c);
if (t != null) {
list.add(t);
if (list.size() >= maxSize) {
return list;
}
}
}
return list;
}
static <T extends Node> int countChildrenMatching(final Node node, final Filtermap<Node, T> filter, int from, int len) {
int sum = 0;
for (int i = from, last = from + len; i < last; i++) {
Node c = node.getChild(i);
T t = filter.apply(c);
if (t != null) {
sum++;
}
}
return sum;
}
static Iterator<Node> childrenIterator(Node parent, final int from, final int to) {
assert parent != null : "parent should not be null";
assert from >= 0 && from <= parent.getNumChildren() : "'from' should be a valid index";
assert to >= 0 && to <= parent.getNumChildren() : "'to' should be a valid index";
assert from <= to : "'from' should be lower than 'to'";
if (to == from) {
return Collections.emptyIterator();
}
return new Iterator<Node>() {
private int i = from;
@Override
public boolean hasNext() {
return i < to;
}
@Override
public @NonNull
Node next() {
return parent.getChild(i++);
}
};
}
}
| 3,758 | 29.072 | 145 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/ast/internal/IteratorBasedNStream.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.internal;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.CollectionUtil;
import net.sourceforge.pmd.util.IteratorUtil;
/**
* Implementations are based on the iterator rather than the stream.
* Benchmarking shows that stream overhead is significant, and doesn't
* decrease when the pipeline grows longer.
*/
abstract class IteratorBasedNStream<T extends Node> implements NodeStream<T> {
@Override
public abstract Iterator<T> iterator();
@Override
public Spliterator<T> spliterator() {
return Spliterators.spliteratorUnknownSize(iterator(), Spliterator.ORDERED);
}
@Override
public Stream<T> toStream() {
return StreamSupport.stream(spliterator(), false);
}
@Override
public <R extends Node> NodeStream<R> flatMap(Function<? super T, ? extends @Nullable NodeStream<? extends R>> mapper) {
// Note temporary function is complete typing is needed so that it compiles with ejc
// see https://bugs.eclipse.org/bugs/show_bug.cgi?id=561482
Function<? super T, Iterator<? extends R>> mapped = mapper.andThen(IteratorBasedNStream::safeMap);
return mapIter(iter -> IteratorUtil.flatMap(iter, mapped));
}
private static <R extends Node> @NonNull Iterator<? extends R> safeMap(@Nullable NodeStream<? extends R> ns) {
return ns == null ? Collections.emptyIterator() : ns.iterator();
}
@Override
public <R extends Node> NodeStream<@NonNull R> map(Function<? super T, ? extends @Nullable R> mapper) {
return mapIter(iter -> IteratorUtil.mapNotNull(iter, mapper));
}
@Override
public NodeStream<T> filter(Predicate<? super @NonNull T> predicate) {
return mapIter(it -> IteratorUtil.mapNotNull(it, Filtermap.filter(predicate)));
}
@Override
public <R extends Node> NodeStream<R> filterIs(Class<? extends R> rClass) {
return mapIter(it -> IteratorUtil.mapNotNull(it, Filtermap.isInstance(rClass)));
}
@Override
public DescendantNodeStream<Node> descendants() {
return flatMapDescendants(Node::descendants);
}
@Override
public DescendantNodeStream<Node> descendantsOrSelf() {
return flatMapDescendants(Node::descendantsOrSelf);
}
@Override
public <R extends Node> DescendantNodeStream<R> descendants(Class<? extends R> rClass) {
return flatMapDescendants(node -> node.descendants(rClass));
}
@NonNull
protected <R extends Node> DescendantNodeStream<R> flatMapDescendants(Function<T, DescendantNodeStream<? extends R>> mapper) {
return new DescendantMapping<>(this, mapper);
}
@Override
public void forEach(Consumer<? super T> action) {
iterator().forEachRemaining(action);
}
@Override
public @Nullable T get(int n) {
if (n == 0) {
return first();
}
return IteratorUtil.getNth(iterator(), n);
}
@Override
public NodeStream<T> drop(int n) {
AssertionUtil.requireNonNegative("n", n);
return n == 0 ? this : mapIter(iter -> IteratorUtil.drop(iter, n));
}
@Override
public NodeStream<T> take(int maxSize) {
AssertionUtil.requireNonNegative("maxSize", maxSize);
return maxSize == 0 ? NodeStream.empty() : mapIter(iter -> IteratorUtil.take(iter, maxSize));
}
@Override
public NodeStream<T> dropLast(int n) {
AssertionUtil.requireNonNegative("n", n);
return n == 0 ? this : mapIter(iter -> IteratorUtil.dropLast(iter, n));
}
@Override
public NodeStream<T> takeWhile(Predicate<? super T> predicate) {
return mapIter(iter -> IteratorUtil.takeWhile(iter, predicate));
}
@Override
public final <R, A> R collect(Collector<? super T, A, R> collector) {
A container = collector.supplier().get();
BiConsumer<A, ? super T> accumulator = collector.accumulator();
forEach(u -> accumulator.accept(container, u));
return CollectionUtil.finish(collector, container);
}
@Override
public NodeStream<T> distinct() {
return mapIter(IteratorUtil::distinct);
}
@Override
public NodeStream<T> peek(Consumer<? super T> action) {
return mapIter(iter -> IteratorUtil.peek(iter, action));
}
@Override
public NodeStream<T> append(NodeStream<? extends T> right) {
return mapIter(iter -> IteratorUtil.concat(iter, right.iterator()));
}
@Override
public NodeStream<T> prepend(NodeStream<? extends T> right) {
return mapIter(iter -> IteratorUtil.concat(right.iterator(), iter));
}
@Override
public boolean any(Predicate<? super T> predicate) {
return IteratorUtil.anyMatch(iterator(), predicate);
}
@Override
public boolean none(Predicate<? super T> predicate) {
return IteratorUtil.noneMatch(iterator(), predicate);
}
@Override
public boolean all(Predicate<? super T> predicate) {
return IteratorUtil.allMatch(iterator(), predicate);
}
@Override
public int count() {
return IteratorUtil.count(iterator());
}
@Override
public boolean nonEmpty() {
return iterator().hasNext();
}
@Override
public @Nullable T first() {
Iterator<T> iter = iterator();
return iter.hasNext() ? iter.next() : null;
}
@Override
public @Nullable T last() {
return IteratorUtil.last(iterator());
}
@Override
public List<T> toList() {
return IteratorUtil.toList(iterator());
}
@Override
public <R extends Node> @Nullable R first(Class<? extends R> r1Class) {
for (T t : this) {
if (r1Class.isInstance(t)) {
return r1Class.cast(t);
}
}
return null;
}
@Override
public @Nullable T first(Predicate<? super T> predicate) {
for (T t : this) {
if (predicate.test(t)) {
return t;
}
}
return null;
}
@Override
public NodeStream<T> cached() {
return StreamImpl.fromNonNullList(toList());
}
protected <R extends Node> NodeStream<R> mapIter(Function<Iterator<T>, Iterator<R>> fun) {
return new IteratorMapping<>(fun);
}
@Override
public String toString() {
return getClass().getSimpleName() + " ["
+ toStream().map(Objects::toString).collect(Collectors.joining(", "))
+ "]";
}
private final class IteratorMapping<S extends Node> extends IteratorBasedNStream<S> {
private final Function<Iterator<T>, Iterator<S>> fun;
private IteratorMapping(Function<Iterator<T>, Iterator<S>> fun) {
this.fun = fun;
}
@Override
public Iterator<S> iterator() {
return fun.apply(IteratorBasedNStream.this.iterator());
}
}
private static class DescendantMapping<T extends Node, S extends Node> extends IteratorBasedNStream<S> implements DescendantNodeStream<S> {
private final Function<? super T, ? extends DescendantNodeStream<? extends S>> fun;
private final TreeWalker walker;
private final IteratorBasedNStream<T> upstream;
private DescendantMapping(IteratorBasedNStream<T> upstream, Function<? super T, ? extends DescendantNodeStream<? extends S>> fun, TreeWalker walker) {
this.fun = fun;
this.walker = walker;
this.upstream = upstream;
}
DescendantMapping(IteratorBasedNStream<T> upstream, Function<? super T, ? extends DescendantNodeStream<? extends S>> fun) {
this(upstream, fun, TreeWalker.DEFAULT);
}
@Override
public Iterator<S> iterator() {
return IteratorUtil.flatMap(
upstream.iterator(),
t -> {
DescendantNodeStream<? extends S> app = fun.apply(t);
return walker.apply(app).iterator();
});
}
@Override
public DescendantNodeStream<S> crossFindBoundaries(boolean cross) {
return walker.isCrossFindBoundaries() == cross
? this
: new DescendantMapping<>(upstream, fun, walker.crossFindBoundaries(cross));
}
}
}
| 9,125 | 29.935593 | 158 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/package-info.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/**
* Language-independent framework to represent code metrics. To find the build-in
* metrics for a language, find the language-specific
* utility class containing {@link net.sourceforge.pmd.lang.metrics.Metric}
* constants, eg in java, {@code JavaMetrics}.
*
* <p>See {@link net.sourceforge.pmd.lang.metrics.Metric} and {@link net.sourceforge.pmd.lang.metrics.MetricsUtil}
* for usage documentation. In some language modules, XPath rules may
* use metrics through an XPath function, e.g. <a href="pmd_userdocs_extending_writing_xpath_rules.html#pmd-java-metric">pmd-java:metric</a>
* function.
*/
package net.sourceforge.pmd.lang.metrics;
| 739 | 42.529412 | 140 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/Metric.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
import static net.sourceforge.pmd.util.CollectionUtil.listOf;
import java.util.List;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.DataMap.DataKey;
/**
* A named computation that can be carried out on some nodes. Example
* include complexity metrics, like cyclomatic complexity.
*
* <p>Use with {@link MetricsUtil}, for example, in the Java module:
* <pre>{@code
* if (JavaMetrics.CYCLO.supports(node)) {
* int cyclo = MetricsUtil.computeMetric(JavaMetrics.CYCLO, node);
* ...
* }
* }</pre>
*
* <p>Note that the {@code supports} check is necessary (metrics cannot
* necessarily be computed on any node of the type they support).
*
* <p>Metrics support a concept of {@linkplain MetricOption options},
* which can be passed to {@link Metric#compute(Metric, Node, MetricOptions) compute}
* or {@link MetricsUtil#computeMetric(Metric, Node, MetricOptions)}.
*
* <p>Metric instances are stateless by contract.
*
* <p>To implement your own metrics, use the factory method {@link #of(BiFunction, Function, String, String...) of}.
* Be aware though, that you cannot register a custom metric into a
* {@link LanguageMetricsProvider}, which means your metric will not be
* available from XPath.
*
* @param <N> Type of nodes the metric can be computed on
* @param <R> Result type of the metric
*
* @author Clément Fournier
* @since 6.0.0
*/
public interface Metric<N extends Node, R extends Number> extends DataKey<Metric<N, R>, R> {
/**
* The full name of the metric. This is the preferred name for displaying.
* Avoid using abbreviations.
*/
String displayName();
/**
* List of name aliases by which the metric is recognisable. This
* list includes the {@link #displayName()} of the metric. These are
* typically an acronym for the display name, or some such mnemonic.
*/
List<String> nameAliases();
/**
* Checks if the metric can be computed on the node.
*
* @param node The node to check
*
* @return True if the metric can be computed
*
* @throws NullPointerException If the parameter is null
*/
default boolean supports(Node node) {
return castIfSupported(node) != null;
}
/**
* Casts the node to the more specific type {@code <N>} if this metric
* can be computed on it. Returns null if the node is not supported.
*
* @param node An arbitrary node
*
* @return The same node, if it is supported
*
* @throws NullPointerException If the parameter is null
*/
@Nullable N castIfSupported(@NonNull Node node);
/**
* Computes the value of the metric for the given node. Behavior if
* the node is unsupported ({@link #castIfSupported(Node)}) is undefined:
* the method may throw, return null, or return a garbage value. For that
* reason the node should be tested beforehand.
*
* @param node The node
* @param options The options of the metric
*
* @return The value of the metric, or null if it could not be computed.
*
* @throws NullPointerException if either parameter is null
*/
R computeFor(N node, MetricOptions options);
/**
* Factory method for a metric. The returned instance does not override
* equals/hashcode.
*
* @param compute Implementation for {@link #computeFor(Node, MetricOptions)} (a pure function).
* @param cast Implementation for {@link #castIfSupported(Node)} (a pure function).
* @param fullName The full name of the metric
* @param aliases Aliases for the name
* @param <R> Return type of the metric
* @param <T> Type of node the metric can be computed on
*
* @return The metric key
*
* @throws NullPointerException If either parameter is null
*/
static <T extends Node, R extends Number> Metric<T, R> of(BiFunction<? super T, MetricOptions, ? extends R> compute,
Function<Node, ? extends @Nullable T> cast,
@NonNull String fullName,
String... aliases) {
AssertionUtil.requireParamNotNull("compute", compute);
AssertionUtil.requireParamNotNull("cast", cast);
AssertionUtil.requireParamNotNull("fullName", fullName);
AssertionUtil.requireParamNotNull("aliases", aliases);
List<String> allNames = listOf(fullName, aliases);
return new Metric<T, R>() {
@Override
public String displayName() {
return fullName;
}
@Override
public List<String> nameAliases() {
return allNames;
}
@Override
public @Nullable T castIfSupported(@NonNull Node node) {
return cast.apply(node);
}
@Override
public R computeFor(T node, MetricOptions options) {
return compute.apply(node, options);
}
};
}
/**
* Compute a metric on an arbitrary node, if possible. This is useful
* in situations where {@code N} is unknown. The result is not cached
* on the node.
*
* @param <N> Type of nodes the metric supports
* @param <R> Return type
* @param metric Metric
* @param node Node
* @param options Options for the metric
*
* @return Null if the node is unsupported, otherwise the result of the metric.
*
* @throws NullPointerException if any of the parameters is null
*/
static <N extends Node, R extends Number> @Nullable R compute(Metric<N, R> metric, Node node, MetricOptions options) {
N n = metric.castIfSupported(node);
if (n != null) {
return metric.computeFor(n, options);
}
return null;
}
}
| 6,364 | 33.592391 | 122 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/ParameterizedMetricKey.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.util.DataMap.DataKey;
/**
* Represents a key parameterized with its options. Used to index memoization maps.
*
* @param <N> Type of node on which the memoized metric can be computed
*
* @author Clément Fournier
* @since 5.8.0
*/
final class ParameterizedMetricKey<N extends Node, R extends Number> implements DataKey<ParameterizedMetricKey<N, R>, R> {
private static final ConcurrentMap<ParameterizedMetricKey<?, ?>, ParameterizedMetricKey<?, ?>> POOL = new ConcurrentHashMap<>();
/** The metric key. */
public final Metric<N, R> metric;
/** The options of the metric. */
public final MetricOptions options;
/** Used internally by the pooler. */
private ParameterizedMetricKey(Metric<N, R> metric, MetricOptions options) {
this.metric = metric;
this.options = options;
}
@Override
public String toString() {
return "ParameterizedMetricKey{key=" + metric.displayName() + ", options=" + options + '}';
}
@Override
public boolean equals(Object o) {
return o instanceof ParameterizedMetricKey
&& ((ParameterizedMetricKey<?, ?>) o).metric.equals(metric)
&& ((ParameterizedMetricKey<?, ?>) o).options.equals(options);
}
@Override
public int hashCode() {
return 31 * metric.hashCode() + options.hashCode();
}
/**
* Builds a parameterized metric key.
*
* @param key The key
* @param options The options
* @param <N> The type of node of the metric key
*
* @return An instance of parameterized metric key corresponding to the parameters
*/
@SuppressWarnings("PMD.SingletonClassReturningNewInstance")
public static <N extends Node, R extends Number> ParameterizedMetricKey<N, R> getInstance(Metric<N, R> key, MetricOptions options) {
// sharing instances allows using DataMap, which uses reference identity
ParameterizedMetricKey<N, R> tmp = new ParameterizedMetricKey<>(key, options);
POOL.putIfAbsent(tmp, tmp);
@SuppressWarnings("unchecked")
ParameterizedMetricKey<N, R> result = (ParameterizedMetricKey<N, R>) POOL.get(tmp);
return result;
}
}
| 2,494 | 30.987179 | 136 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/MetricOption.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
/**
* Option to pass to a metric. Options modify the behaviour of a metric.
* You must bundle them into a {@link MetricOptions} to pass them all to a metric.
*
* <p>Options must be suitable for use in sets (implement equals/hashcode,
* or be singletons).
*
* @author Clément Fournier
* @since 6.0.0
*/
public interface MetricOption {
/**
* Returns the name of the option constant.
*
* @return The name of the option constant.
*/
String name();
/**
* Returns the name of the option as it should be used in properties.
*
* @return The name of the option.
*/
String valueName();
}
| 778 | 21.911765 | 82 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/LanguageMetricsProvider.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.LanguageVersionHandler;
import net.sourceforge.pmd.lang.ast.Node;
/**
* Language-specific provider for metrics. Knows about all the metrics
* defined for a language. Can be used e.g. to build GUI applications
* like the designer, in a language independent way. Accessible through
* {@link LanguageVersionHandler#getLanguageMetricsProvider()}.
*
*
* @author Clément Fournier
* @since 6.11.0
*/
public interface LanguageMetricsProvider {
/** Returns the set of all metrics supported by the language. */
Set<Metric<?, ?>> getMetrics();
/** Fetch a metric using its name. */
default @Nullable Metric<?, ?> getMetricWithName(String nameIgnoringCase) {
for (Metric<?, ?> metric : getMetrics()) {
for (String nameAlias : metric.nameAliases()) {
if (nameAlias.equalsIgnoreCase(nameIgnoringCase)) {
return metric;
}
}
}
return null;
}
/**
* Computes all metrics available on the given node.
* The returned results may contain Double.NaN as a value.
*
* @param node Node to inspect
*
* @return A map of metric key to their result, possibly empty, but with no null value
*/
default Map<Metric<?, ?>, Number> computeAllMetricsFor(Node node) {
Map<Metric<?, ?>, Number> results = new HashMap<>();
for (Metric<?, ?> metric : getMetrics()) {
@Nullable Number result = Metric.compute(metric, node, MetricOptions.emptyOptions());
if (result != null) {
results.put(metric, result);
}
}
return results;
}
}
| 1,950 | 29.484375 | 97 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/MetricsUtil.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
import java.util.DoubleSummaryStatistics;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import net.sourceforge.pmd.lang.ast.Node;
/**
* Utilities to use {@link Metric} instances.
*/
public final class MetricsUtil {
static final String NULL_KEY_MESSAGE = "The metric key must not be null";
static final String NULL_OPTIONS_MESSAGE = "The metric options must not be null";
static final String NULL_NODE_MESSAGE = "The node must not be null";
private MetricsUtil() {
// util class
}
public static boolean supportsAll(Node node, Metric<?, ?>... metrics) {
for (Metric<?, ?> metric : metrics) {
if (!metric.supports(node)) {
return false;
}
}
return true;
}
/**
* Computes statistics for the results of a metric over a sequence of nodes.
*
* @param key The metric to compute
* @param ops List of nodes for which to compute the metric
*
* @return Statistics for the value of the metric over all the nodes
*/
public static <O extends Node> DoubleSummaryStatistics computeStatistics(Metric<? super O, ?> key, Iterable<? extends O> ops) {
return computeStatistics(key, ops, MetricOptions.emptyOptions());
}
/**
* Computes statistics for the results of a metric over a sequence of nodes.
*
* @param key The metric to compute
* @param ops List of nodes for which to compute the metric
* @param options The options of the metric
*
* @return Statistics for the value of the metric over all the nodes
*/
public static <O extends Node> DoubleSummaryStatistics computeStatistics(Metric<? super O, ?> key,
Iterable<? extends O> ops,
MetricOptions options) {
Objects.requireNonNull(key, NULL_KEY_MESSAGE);
Objects.requireNonNull(options, NULL_OPTIONS_MESSAGE);
Objects.requireNonNull(ops, NULL_NODE_MESSAGE);
return StreamSupport.stream(ops.spliterator(), false)
.filter(key::supports)
.collect(Collectors.summarizingDouble(op -> computeMetric(key, op, options).doubleValue()));
}
/**
* Computes a metric identified by its code on a node, with the default options.
*
* @param key The key identifying the metric to be computed
* @param node The node on which to compute the metric
*
* @return The value of the metric, or {@code Double.NaN} if the value couldn't be computed
*/
public static <N extends Node, R extends Number> R computeMetric(Metric<? super N, R> key, N node) {
return computeMetric(key, node, MetricOptions.emptyOptions());
}
/**
* Computes a metric identified by its code on a node, possibly
* selecting a variant with the {@code options} parameter.
*
* <p>Note that contrary to the previous behaviour, this method
* throws an exception if the metric does not support the node.
*
* @param key The key identifying the metric to be computed
* @param node The node on which to compute the metric
* @param options The options of the metric
*
* @return The value of the metric
*
* @throws IllegalArgumentException If the metric does not support the given node
*/
public static <N extends Node, R extends Number> R computeMetric(Metric<? super N, R> key, N node, MetricOptions options) {
return computeMetric(key, node, options, false);
}
/**
* Computes a metric identified by its code on a node, possibly
* selecting a variant with the {@code options} parameter.
*
* <p>Note that contrary to the previous behaviour, this method
* throws an exception if the metric does not support the node.
*
* @param key The key identifying the metric to be computed
* @param node The node on which to compute the metric
* @param options The options of the metric
* @param forceRecompute Force recomputation of the result
*
* @return The value of the metric
*
* @throws IllegalArgumentException If the metric does not support the given node
*/
public static <N extends Node, R extends Number> R computeMetric(Metric<? super N, R> key, N node, MetricOptions options, boolean forceRecompute) {
Objects.requireNonNull(key, NULL_KEY_MESSAGE);
Objects.requireNonNull(options, NULL_OPTIONS_MESSAGE);
Objects.requireNonNull(node, NULL_NODE_MESSAGE);
if (!key.supports(node)) {
throw new IllegalArgumentException(key + " cannot be computed on " + node);
}
ParameterizedMetricKey<? super N, R> paramKey = ParameterizedMetricKey.getInstance(key, options);
R prev = node.getUserMap().get(paramKey);
if (!forceRecompute && prev != null) {
return prev;
}
R val = key.computeFor(node, options);
node.getUserMap().set(paramKey, val);
return val;
}
}
| 5,366 | 37.06383 | 151 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/metrics/MetricOptions.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.metrics;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* Bundles a set of options to pass to a metric. Metrics may use these options as they see fit.
*
* @author Clément Fournier
* @since 6.0.0
*/
public final class MetricOptions {
private static final Map<MetricOptions, MetricOptions> POOL = new HashMap<>();
private static final MetricOptions EMPTY_OPTIONS;
private final Set<MetricOption> options;
static {
EMPTY_OPTIONS = new MetricOptions();
POOL.put(EMPTY_OPTIONS, EMPTY_OPTIONS);
}
private MetricOptions() {
options = Collections.emptySet();
}
private MetricOptions(Set<? extends MetricOption> opts) {
switch (opts.size()) {
case 0:
options = Collections.emptySet();
break;
case 1:
options = Collections.<MetricOption>singleton(opts.iterator().next());
break;
default:
options = Collections.unmodifiableSet(opts);
break;
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
MetricOptions other = (MetricOptions) o;
return options.equals(other.options);
}
@Override
public int hashCode() {
return options.hashCode();
}
/**
* Returns an immutable set of options. Metrics may use these options as they see fit.
*
* @return The set of options of this version
*/
public Set<MetricOption> getOptions() {
return options;
}
/**
* Returns true if this bundle contains the given option.
*
* @param option Option to look for
*/
public boolean contains(MetricOption option) {
return options.contains(option);
}
@Override
public String toString() {
return "MetricOptions{"
+ "options=" + options
+ '}';
}
/**
* Returns an empty options bundle.
*
* @return An empty options bundle
*/
public static MetricOptions emptyOptions() {
return EMPTY_OPTIONS;
}
/**
* Gets an options bundle from a collection of options.
*
* @param options The options to build the bundle from
*
* @return An options bundle
*/
public static MetricOptions ofOptions(Collection<? extends MetricOption> options) {
MetricOptionsBuilder builder = new MetricOptionsBuilder();
builder.addAll(options);
return builder.build();
}
/**
* Gets an options bundle from options.
*
* @param option Mandatory first argument
* @param options Rest of the options
*
* @return An options bundle
*/
public static MetricOptions ofOptions(MetricOption option, MetricOption... options) {
MetricOptionsBuilder builder = new MetricOptionsBuilder();
builder.add(option);
for (MetricOption opt : options) {
builder.add(opt);
}
return builder.build();
}
private static final class MetricOptionsBuilder {
private Set<MetricOption> opts = new HashSet<>();
void add(MetricOption option) {
if (option != null) {
opts.add(option);
}
}
void addAll(Collection<? extends MetricOption> options) {
if (options != null) {
this.opts.addAll(options);
opts.remove(null);
}
}
MetricOptions build() {
if (opts.isEmpty()) {
return emptyOptions();
}
MetricOptions result = new MetricOptions(opts);
if (!POOL.containsKey(result)) {
POOL.put(result, result);
}
return POOL.get(result);
}
}
}
| 4,156 | 21.349462 | 95 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/RootTextDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.IOException;
import java.util.Objects;
import net.sourceforge.pmd.internal.util.BaseCloseable;
import net.sourceforge.pmd.lang.LanguageVersion;
/**
* A text document directly backed by a {@link TextFile}. In the future
* some other implementations of the interface may be eg views on part
* of another document.
*/
final class RootTextDocument extends BaseCloseable implements TextDocument {
private final TextFile backend;
// to support CPD with the same api, we could probably just store
// a soft reference to the contents, and build the positioner eagerly.
private final TextFileContent content;
private final LanguageVersion langVersion;
private final FileId fileId;
RootTextDocument(TextFile backend) throws IOException {
this.backend = backend;
this.content = backend.readContents();
this.langVersion = backend.getLanguageVersion();
this.fileId = backend.getFileId();
Objects.requireNonNull(langVersion, "Null language version for file " + backend);
Objects.requireNonNull(fileId, "Null path id for file " + backend);
}
@Override
public LanguageVersion getLanguageVersion() {
return langVersion;
}
@Override
public FileId getFileId() {
return fileId;
}
@Override
protected void doClose() throws IOException {
backend.close();
}
@Override
public Chars getText() {
return content.getNormalizedText();
}
@Override
public FileLocation toLocation(TextRegion region) {
checkInRange(region, this.getLength());
SourceCodePositioner positioner = content.getPositioner();
// We use longs to return both numbers at the same time
// This limits us to 2 billion lines or columns, which is FINE
TextPos2d bpos = positioner.lineColFromOffset(region.getStartOffset(), true);
TextPos2d epos = region.isEmpty() ? bpos
: positioner.lineColFromOffset(region.getEndOffset(), false);
return new FileLocation(
fileId,
bpos.getLine(),
bpos.getColumn(),
epos.getLine(),
epos.getColumn(),
region
);
}
@Override
public TextPos2d lineColumnAtOffset(int offset, boolean inclusive) {
return content.getPositioner().lineColFromOffset(offset, inclusive);
}
@Override
public TextRegion createLineRange(int startLineInclusive, int endLineInclusive) {
SourceCodePositioner positioner = content.getPositioner();
if (!positioner.isValidLine(startLineInclusive)
|| !positioner.isValidLine(endLineInclusive)
|| startLineInclusive > endLineInclusive) {
throw invalidLineRange(startLineInclusive, endLineInclusive, positioner.getLastLine());
}
int first = positioner.offsetFromLineColumn(startLineInclusive, 1);
int last = positioner.offsetOfEndOfLine(endLineInclusive);
return TextRegion.fromBothOffsets(first, last);
}
static void checkInRange(TextRegion region, int length) {
if (region.getEndOffset() > length) {
throw regionOutOfBounds(region.getStartOffset(), region.getEndOffset(), length);
}
}
@Override
public long getCheckSum() {
return content.getCheckSum();
}
@Override
public Chars sliceOriginalText(TextRegion region) {
return getText().subSequence(region.getStartOffset(), region.getEndOffset());
}
private static final String NOT_IN_RANGE = "Region [start=%d, end=%d[ is not in range of this document (length %d)";
private static final String INVALID_LINE_RANGE = "Line range %d..%d is not in range of this document (%d lines) (line numbers are 1-based)";
private static final String INVALID_OFFSET = "Offset %d is not in range of this document (length %d) (offsets are 0-based)";
static IndexOutOfBoundsException invalidLineRange(int start, int end, int numLines) {
return new IndexOutOfBoundsException(String.format(INVALID_LINE_RANGE, start, end, numLines));
}
static IndexOutOfBoundsException regionOutOfBounds(int start, int end, int maxLen) {
return new IndexOutOfBoundsException(String.format(NOT_IN_RANGE, start, end, maxLen));
}
static IndexOutOfBoundsException invalidOffset(int offset, int maxLen) {
return new IndexOutOfBoundsException(String.format(INVALID_OFFSET, offset, maxLen));
}
}
| 4,663 | 33.548148 | 144 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/package-info.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/**
* Contains types to model text files and handle operations on text.
* Parser implementations build upon this framework. This package is
* built around the type {@link net.sourceforge.pmd.lang.document.TextFile},
* which represents a source file and allows reading and writing. The
* class {@link net.sourceforge.pmd.lang.document.TextDocument} models
* an in-memory snapshot of the state of a TextFile, and exposes information
* like line/offset mapping.
*
* @see net.sourceforge.pmd.lang.document.TextFile
* @see net.sourceforge.pmd.lang.document.TextDocument
* @see net.sourceforge.pmd.reporting.Reportable
*/
@Experimental
package net.sourceforge.pmd.lang.document;
import net.sourceforge.pmd.annotation.Experimental;
| 826 | 36.590909 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/FragmentedDocBuilder.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import net.sourceforge.pmd.lang.document.FragmentedTextDocument.Fragment;
public final class FragmentedDocBuilder {
private final Chars mainBuf;
private final TextDocument original;
private Fragment lastFragment;
private Fragment firstFragment;
private int curOffInInput;
public FragmentedDocBuilder(TextDocument original) {
this.mainBuf = original.getText();
this.original = original;
}
public FileLocation toLocation(int indexInInput) {
return original.toLocation(TextRegion.caretAt(indexInInput));
}
/**
* Add a new fragment.
*
* @param startInInput Start (inclusive) of the overwritten text in the source
* @param endInInput End (exclusive) ...
* @param translation Characters with which the range startInInput..endInInput are overwritten.
* This may be empty.
*/
public void recordDelta(int startInInput, int endInInput, Chars translation) {
assert curOffInInput <= startInInput : "Already moved past " + curOffInInput + ", cannot add delta at " + startInInput;
assert startInInput <= endInInput : "Offsets must be ordered";
assert translation != null : "Translation cannot be null";
int inLength = endInInput - startInInput;
if (firstFragment == null) {
assert lastFragment == null;
firstFragment = new Fragment(null, startInInput, mainBuf.slice(0, startInInput));
lastFragment = new Fragment(firstFragment, inLength, translation);
curOffInInput = endInInput;
return;
}
Fragment last = lastFragment;
int prevLen = startInInput - curOffInInput;
if (prevLen != 0) {
last = new Fragment(last, prevLen, mainBuf.slice(curOffInInput, prevLen));
}
last = new Fragment(last, inLength, translation);
this.lastFragment = last;
this.curOffInInput = endInInput;
}
public TextDocument build() {
if (firstFragment == null) {
// No deltas in whole document, there's a single fragment
// This is the case for > 97% of Java files (source: OpenJDK)
return original;
} else {
if (curOffInInput < mainBuf.length()) {
// there's some text left between the last fragment and the end of the doc
int remLen = mainBuf.length() - curOffInInput;
Chars remainder = mainBuf.slice(curOffInInput, remLen);
lastFragment = new Fragment(lastFragment, remLen, remainder);
}
return new FragmentedTextDocument(original, firstFragment, lastFragment);
}
}
}
| 2,843 | 35.461538 | 127 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/ReaderTextFile.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.IOException;
import java.io.Reader;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* Read-only view on a string.
*/
class ReaderTextFile implements TextFile {
private final FileId fileId;
private final LanguageVersion languageVersion;
private final Reader reader;
ReaderTextFile(Reader reader, @NonNull FileId fileId, LanguageVersion languageVersion) {
AssertionUtil.requireParamNotNull("reader", reader);
AssertionUtil.requireParamNotNull("path id", fileId);
AssertionUtil.requireParamNotNull("language version", languageVersion);
this.reader = reader;
this.languageVersion = languageVersion;
this.fileId = fileId;
}
@Override
public FileId getFileId() {
return fileId;
}
@Override
public @NonNull LanguageVersion getLanguageVersion() {
return languageVersion;
}
@Override
public TextFileContent readContents() throws IOException {
return TextFileContent.fromReader(reader); // this closes the reader
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public String toString() {
return "ReaderTextFile[" + fileId.getAbsolutePath() + "]";
}
}
| 1,514 | 24.25 | 92 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/FileLocation.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.util.Comparator;
import java.util.Objects;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.RuleViolation;
import net.sourceforge.pmd.lang.ast.GenericToken;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.reporting.Reportable;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* Represents the coordinates of a text region, used for reporting. This provides access
* to the line and column positions, as well as the text file. Instances
* can be obtained from a {@link TextRegion} with {@link TextDocument#toLocation(TextRegion) TextDocument::toLocation}.
*
* <p>This should replace the text coordinates methods in {@link Node},
* {@link GenericToken}, and {@link RuleViolation} at least (see {@link Reportable}).
*
* TODO the end line/end column are barely used, mostly ignored even by
* renderers. Maybe these could be optional, or replaced by just a length
* in case a renderer wants to cut out a piece of the file.
*/
public final class FileLocation {
public static final Comparator<FileLocation> COORDS_COMPARATOR =
Comparator.comparing(FileLocation::getStartPos)
.thenComparing(FileLocation::getEndPos);
public static final Comparator<FileLocation> COMPARATOR =
Comparator.comparing(FileLocation::getFileId).thenComparing(COORDS_COMPARATOR);
private final int beginLine;
private final int endLine;
private final int beginColumn;
private final int endColumn;
private final FileId fileName;
private final @Nullable TextRegion region;
FileLocation(FileId fileName, int beginLine, int beginColumn, int endLine, int endColumn) {
this(fileName, beginLine, beginColumn, endLine, endColumn, null);
}
FileLocation(FileId fileName, int beginLine, int beginColumn, int endLine, int endColumn, @Nullable TextRegion region) {
this.fileName = Objects.requireNonNull(fileName);
this.beginLine = AssertionUtil.requireOver1("Begin line", beginLine);
this.endLine = AssertionUtil.requireOver1("End line", endLine);
this.beginColumn = AssertionUtil.requireOver1("Begin column", beginColumn);
this.endColumn = AssertionUtil.requireOver1("End column", endColumn);
this.region = region;
requireLinesCorrectlyOrdered();
}
private void requireLinesCorrectlyOrdered() {
if (beginLine > endLine) {
throw AssertionUtil.mustBe("endLine", endLine, ">= beginLine (= " + beginLine + ")");
} else if (beginLine == endLine && beginColumn > endColumn) {
throw AssertionUtil.mustBe("endColumn", endColumn, ">= beginColumn (= " + beginColumn + ")");
}
}
/**
* File name of this position.
*/
public FileId getFileId() {
return fileName;
}
/** Inclusive, 1-based line number. */
public int getStartLine() {
return beginLine;
}
/** Inclusive, 1-based line number. */
public int getEndLine() {
return endLine;
}
/** Inclusive, 1-based column number. */
public int getStartColumn() {
return beginColumn;
}
/** <b>Exclusive</b>, 1-based column number. */
public int getEndColumn() {
return endColumn;
}
/**
* Returns the start position.
*/
public TextPos2d getStartPos() {
return TextPos2d.pos2d(beginLine, beginColumn);
}
/**
* Returns the end position.
*/
public TextPos2d getEndPos() {
return TextPos2d.pos2d(endLine, endColumn);
}
/**
* Turn this into a range country.
*/
public TextRange2d toRange2d() {
return TextRange2d.range2d(beginLine, beginColumn, endLine, endColumn);
}
/** Returns the region in the file, or null if this was not available. */
public @Nullable TextRegion getRegionInFile() {
return region;
}
/**
* Formats the start position as e.g. {@code "line 1, column 2"}.
*/
public String startPosToString() {
return getStartPos().toDisplayStringInEnglish();
}
/**
* Formats the start position as e.g. {@code "/path/to/file:1:2"}.
*/
public String startPosToStringWithFile() {
return getFileId().getOriginalPath() + ":" + getStartPos().toDisplayStringWithColon();
}
/**
* Creates a new location for a range of text.
*
* @throws IllegalArgumentException If the file name is null
* @throws IllegalArgumentException If any of the line/col parameters are strictly less than 1
* @throws IllegalArgumentException If the line and column are not correctly ordered
* @throws IllegalArgumentException If the start offset or length are negative
*/
public static FileLocation range(FileId fileName, TextRange2d range2d) {
TextPos2d start = range2d.getStartPos();
TextPos2d end = range2d.getEndPos();
return new FileLocation(fileName,
start.getLine(),
start.getColumn(),
end.getLine(),
end.getColumn());
}
/**
* Returns a new location that starts and ends at the same position.
*
* @param fileName File name
* @param line Line number
* @param column Column number
*
* @return A new location
*
* @throws IllegalArgumentException See {@link #range(FileId, TextRange2d)}
*/
public static FileLocation caret(FileId fileName, int line, int column) {
return new FileLocation(fileName, line, column, line, column);
}
@Override
public String toString() {
return "!debug only! " + startPosToStringWithFile();
}
}
| 5,896 | 32.129213 | 124 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/CpdCompat.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import net.sourceforge.pmd.cpd.SourceCode;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.PlainTextLanguage;
/**
* Compatibility APIs, to be removed before PMD 7 is out.
*/
@Deprecated
public final class CpdCompat {
private CpdCompat() {
// utility class
}
@Deprecated
public static LanguageVersion dummyVersion() {
return PlainTextLanguage.getInstance().getDefaultVersion();
}
/**
* Bridges {@link SourceCode} with {@link TextFile}. This allows
* javacc tokenizers to work on text documents.
*
* @deprecated This is only a transitional API for the PMD 7 branch
*/
@Deprecated
public static TextFile cpdCompat(SourceCode sourceCode) {
return TextFile.forCharSeq(
sourceCode.getCodeBuffer(),
FileId.fromPathLikeString(sourceCode.getFileName()),
dummyVersion()
);
}
}
| 1,067 | 25.04878 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextFile.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.Charset;
import java.nio.file.Path;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.cpd.SourceCode;
import net.sourceforge.pmd.internal.util.BaseCloseable;
import net.sourceforge.pmd.internal.util.IOUtil;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.document.TextFileBuilder.ForCharSeq;
import net.sourceforge.pmd.lang.document.TextFileBuilder.ForNio;
import net.sourceforge.pmd.lang.document.TextFileBuilder.ForReader;
import net.sourceforge.pmd.util.datasource.DataSource;
/**
* Represents some location containing character data. Despite the name,
* it's not necessarily backed by a file in the file-system: it may be
* eg an in-memory buffer, or a zip entry, ie it's an abstraction. Text
* files are the input which PMD and CPD process.
*
* <p>Text files must provide read access, and may provide write access.
* This interface only provides block IO operations, while {@link TextDocument} adds logic
* about incremental edition (eg replacing a single region of text).
*
* <p>This interface is meant to replace {@link DataSource} and {@link SourceCode.CodeLoader}.
* "DataSource" is not an appropriate name for a file which can be written
* to, also, the "data" it provides is text, not bytes.
*/
public interface TextFile extends Closeable {
/**
* Returns the language version which should be used to process this
* file. This is a property of the file, which allows sources for
* different language versions to be processed in the same
* PMD run. It also makes it so, that the file extension is not interpreted
* to find out the language version after the initial file collection
* phase.
*
* @return A language version
*/
@NonNull
LanguageVersion getLanguageVersion();
/**
* Returns an identifier for this file. This should not
* be interpreted as a {@link File}, it may not be a file on this
* filesystem. Two distinct text files should have distinct path IDs,
* and from one analysis to the next, the path ID of logically identical
* files should be the same.
*/
FileId getFileId();
/**
* Returns true if this file cannot be written to. In that case,
* {@link #writeContents(TextFileContent)} will throw an exception.
* In the general case, nothing prevents this method's result from
* changing from one invocation to another.
*/
default boolean isReadOnly() {
return true;
}
/**
* Writes the given content to the underlying character store.
*
* @param content Content to write, with lines separated by the given line separator
*
* @throws IOException If this instance is closed
* @throws IOException If an error occurs
* @throws ReadOnlyFileException If this text source is read-only
*/
default void writeContents(TextFileContent content) throws IOException {
throw new ReadOnlyFileException(this);
}
/**
* Reads the contents of the underlying character source.
*
* @return The most up-to-date content
*
* @throws IOException If this instance is closed
* @throws IOException If reading causes an IOException
*/
TextFileContent readContents() throws IOException;
/**
* Release resources associated with this text file. Is a noop if
* it is called several times.
*
* @throws IOException If an IO exception occurs
*/
@Override
void close() throws IOException;
/**
* Text file equality is implementation-defined. The only constraint
* is that equal text files should have equal path IDs (and the usual
* properties mandated by {@link Object#equals(Object)}).
*/
// currently:
// - Path-based TextFiles compare their path for equality, where the path is not normalized.
// - Reader- and String-based TextFiles use identity semantics.
@Override
boolean equals(Object o);
// factory methods
/**
* Returns an instance of this interface reading and writing to a file.
* See {@link #builderForPath(Path, Charset, LanguageVersion) builderForPath}
* for more info.
*
* @param path Path to the file
* @param charset Encoding to use
* @param languageVersion Language version to use
*
* @throws NullPointerException If any parameter is null
*/
static TextFile forPath(Path path, Charset charset, LanguageVersion languageVersion) {
return builderForPath(path, charset, languageVersion)
.build();
}
/**
* Returns a builder for a textfile that reads and write to the file.
* The returned instance may be read-only. If the file is not a regular
* file (eg, a directory), or does not exist, then {@link TextFile#readContents()}
* will throw.
*
* <p>The display name is by default the given path (without normalization),
* while the path id is the absolute path.
*
* @param path Path to the file
* @param charset Encoding to use
* @param languageVersion Language version to use
*
* @throws NullPointerException If any parameter is null
*/
static TextFileBuilder builderForPath(Path path, Charset charset, LanguageVersion languageVersion) {
return new ForNio(languageVersion, path, charset);
}
/**
* Returns a read-only TextFile reading from a string.
* Note that this will normalize the text, in such a way that {@link TextFile#readContents()}
* may not produce exactly the same char sequence.
*
* @param charseq Text of the file
* @param fileId File name to use as path id
* @param languageVersion Language version
*
* @throws NullPointerException If any parameter is null
*/
static TextFile forCharSeq(CharSequence charseq, FileId fileId, LanguageVersion languageVersion) {
return builderForCharSeq(charseq, fileId, languageVersion)
.build();
}
/**
* Returns a read-only TextFile reading from a string.
* Note that this will normalize the text, in such a way that {@link TextFile#readContents()}
* may not produce exactly the same char sequence.
*
* @param charseq Text of the file
* @param fileId File name to use as path id
* @param languageVersion Language version
*
* @throws NullPointerException If any parameter is null
*/
static TextFileBuilder builderForCharSeq(CharSequence charseq, FileId fileId, LanguageVersion languageVersion) {
return new ForCharSeq(charseq, fileId, languageVersion);
}
/**
* Returns a read-only instance of this interface reading from a reader.
* The reader is first read when {@link TextFile#readContents()} is first
* called, and is closed when that method exits. Note that this may
* only be called once, afterwards, {@link TextFile#readContents()} will
* throw an {@link IOException}.
*
* @param reader Text of the file
* @param fileId File name to use as path id
* @param languageVersion Language version
*
* @throws NullPointerException If any parameter is null
*/
static TextFile forReader(Reader reader, FileId fileId, LanguageVersion languageVersion) {
return builderForReader(reader, fileId, languageVersion)
.build();
}
/**
* Returns a read-only builder reading from a reader.
* The reader is first read when {@link TextFile#readContents()} is first
* called, and is closed when that method exits. Note that this may
* only be called once, afterwards, {@link TextFile#readContents()} will
* throw an {@link IOException}.
*
* @param reader Text of the file
* @param fileId File name to use as path id
* @param languageVersion Language version
*
* @throws NullPointerException If any parameter is null
*/
static TextFileBuilder builderForReader(Reader reader, FileId fileId, LanguageVersion languageVersion) {
return new ForReader(languageVersion, reader, fileId);
}
/**
* Wraps the given {@link DataSource} (provided for compatibility).
* Note that data sources are only usable once (even {@link DataSource#forString(String, String)}),
* so calling {@link TextFile#readContents()} twice will throw the second time.
*
* @deprecated This is deprecated until PMD 7 is out, after which
* {@link DataSource} will be removed.
*/
@Deprecated
@DeprecatedUntil700
static TextFile dataSourceCompat(DataSource ds, PMDConfiguration config) {
String pathId = ds.getNiceFileName(false, null);
FileId fileId2 = FileId.fromPathLikeString(pathId);
LanguageVersion languageVersion = config.getLanguageVersionOfFile(pathId);
if (languageVersion == null) {
throw new NullPointerException("no language version detected for " + pathId);
}
class DataSourceTextFile extends BaseCloseable implements TextFile {
@Override
public @NonNull LanguageVersion getLanguageVersion() {
return languageVersion;
}
@Override
public FileId getFileId() {
return fileId2;
}
@Override
public TextFileContent readContents() throws IOException {
ensureOpen();
try (InputStream is = ds.getInputStream();
Reader reader = new BufferedReader(new InputStreamReader(is, config.getSourceEncoding()))) {
String contents = IOUtil.readToString(reader);
return TextFileContent.fromCharSeq(contents);
}
}
@Override
protected void doClose() throws IOException {
ds.close();
}
}
return new DataSourceTextFile();
}
}
| 10,558 | 36.845878 | 116 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/ReadOnlyFileException.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
/**
* Thrown when an attempt to write through a {@link TextFile}
* fails because the file is read-only.
*/
public class ReadOnlyFileException extends UnsupportedOperationException {
public ReadOnlyFileException(TextFile textFile) {
super("Read only: " + textFile.getFileId().getAbsolutePath());
}
}
| 452 | 24.166667 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextFileContent.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.Adler32;
import java.util.zip.CheckedInputStream;
import java.util.zip.Checksum;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.internal.util.IOUtil;
/**
* Contents of a text file.
*/
public final class TextFileContent {
// the three line terminators we handle.
private static final String CRLF = "\r\n";
private static final String LF = "\n";
private static final String CR = "\r";
/**
* The normalized line ending used to replace platform-specific
* line endings in the {@linkplain #getNormalizedText() normalized text}.
*/
public static final String NORMALIZED_LINE_TERM = LF;
/** The normalized line ending as a char. */
public static final char NORMALIZED_LINE_TERM_CHAR = '\n';
private static final int DEFAULT_BUFSIZE = 8192;
private static final Pattern NEWLINE_PATTERN = Pattern.compile("\r\n?|\n");
private static final String FALLBACK_LINESEP = System.lineSeparator();
private final Chars cdata;
private final String lineTerminator;
private final long checkSum;
private final SourceCodePositioner positioner;
private TextFileContent(Chars normalizedText, String lineTerminator, long checkSum, SourceCodePositioner positioner) {
this.cdata = normalizedText;
this.lineTerminator = lineTerminator;
this.checkSum = checkSum;
this.positioner = positioner;
}
/**
* The text of the file, with the following normalizations:
* <ul>
* <li>Line endings are normalized to {@value NORMALIZED_LINE_TERM}.
* For this purpose, a line ending is either {@code \r}, {@code \r\n}
* or {@code \n} (CR, CRLF or LF), not the full range of unicode line
* endings. This is consistent with {@link BufferedReader#readLine()},
* and the JLS, for example.
* <li>An initial byte-order mark is removed, if any.
* </ul>
*/
public Chars getNormalizedText() {
return cdata;
}
/**
* Returns the original line terminator found in the file. This is
* the terminator that should be used to write the file back to disk.
* If the original file either has mixed line terminators, or has no
* line terminator at all, the line terminator defaults to the
* platform-specific one ({@link System#lineSeparator()}).
*/
public String getLineTerminator() {
return lineTerminator;
}
/**
* Returns a checksum for the contents of the file. The checksum is
* computed on the unnormalized bytes, so may be affected by a change
* line terminators. This is why two {@link TextFileContent}s with the
* same normalized content may have different checksums.
*/
public long getCheckSum() {
return checkSum;
}
SourceCodePositioner getPositioner() {
return positioner;
}
/**
* Normalize the line endings of the text to {@value NORMALIZED_LINE_TERM},
* returns a {@link TextFileContent} containing the original line ending.
* If the text does not contain any line terminators, or if it contains a
* mix of different terminators, falls back to the platform-specific line
* separator.
*
* @param text Text content of a file
*
* @return A text file content
*/
public static @NonNull TextFileContent fromCharSeq(CharSequence text) {
return normalizeCharSeq(text, FALLBACK_LINESEP);
}
/**
* Read the reader fully and produce a {@link TextFileContent}. This
* closes the reader. This takes care of buffering.
*
* @param reader A reader
*
* @return A text file content
*
* @throws IOException If an IO exception occurs
*/
public static TextFileContent fromReader(Reader reader) throws IOException {
try (Reader r = reader) {
return normalizingRead(r, DEFAULT_BUFSIZE, FALLBACK_LINESEP, newChecksum(), true);
}
}
/**
* Reads the contents of the input stream into a TextFileContent.
* This closes the input stream. This takes care of buffering.
*
* @param inputStream Input stream
* @param sourceEncoding Encoding to use to read from the data source
*/
public static TextFileContent fromInputStream(InputStream inputStream, Charset sourceEncoding) throws IOException {
return fromInputStream(inputStream, sourceEncoding, FALLBACK_LINESEP);
}
// test only
static TextFileContent fromInputStream(InputStream inputStream, Charset sourceEncoding, String fallbackLineSep) throws IOException {
Checksum checksum = newChecksum();
try (CheckedInputStream checkedIs = new CheckedInputStream(new BufferedInputStream(inputStream), checksum);
// no need to buffer this reader as we already use our own char buffer
Reader reader = new InputStreamReader(checkedIs, sourceEncoding)) {
return normalizingRead(reader, DEFAULT_BUFSIZE, fallbackLineSep, checksum, false);
}
}
// test only
static @NonNull TextFileContent normalizeCharSeq(CharSequence text, String fallBackLineSep) {
long checksum = getCheckSum(text); // the checksum is computed on the original file
if (text.length() > 0 && text.charAt(0) == IOUtil.UTF_BOM) {
text = text.subSequence(1, text.length()); // skip the BOM
}
Matcher matcher = NEWLINE_PATTERN.matcher(text);
boolean needsNormalization = false;
String lineTerminator = null;
while (matcher.find()) {
lineTerminator = detectLineTerm(lineTerminator, matcher.group(), fallBackLineSep);
if (!NORMALIZED_LINE_TERM.equals(lineTerminator)) {
needsNormalization = true;
if (lineTerminator.equals(fallBackLineSep)) {
// otherwise a mixed delimiter may follow, and we must
// detect it to fallback on the system separator
break;
}
}
}
if (lineTerminator == null) {
// no line sep, default to platform sep
lineTerminator = fallBackLineSep;
needsNormalization = false;
}
if (needsNormalization) {
text = NEWLINE_PATTERN.matcher(text).replaceAll(NORMALIZED_LINE_TERM);
}
return new TextFileContent(Chars.wrap(text), lineTerminator, checksum, SourceCodePositioner.create(text));
}
// test only
// the bufsize and fallbackLineSep parameters are here just for testability
static TextFileContent normalizingRead(Reader input, int bufSize, String fallbackLineSep) throws IOException {
return normalizingRead(input, bufSize, fallbackLineSep, newChecksum(), true);
}
static TextFileContent normalizingRead(Reader input, int bufSize, String fallbackLineSep, Checksum checksum, boolean updateChecksum) throws IOException {
char[] cbuf = new char[bufSize];
StringBuilder result = new StringBuilder(bufSize);
String detectedLineTerm = null;
boolean afterCr = false;
SourceCodePositioner.Builder positionerBuilder = new SourceCodePositioner.Builder();
int bufOffset = 0;
int nextCharToCopy = 0;
int n = input.read(cbuf);
if (n > 0 && cbuf[0] == IOUtil.UTF_BOM) {
nextCharToCopy = 1;
}
while (n != IOUtil.EOF) {
if (updateChecksum) {
// if we use a checked input stream we dont need to update the checksum manually
// note that this checksum operates on non-normalized characters
updateChecksum(checksum, CharBuffer.wrap(cbuf, nextCharToCopy, n));
}
int offsetDiff = 0;
for (int i = nextCharToCopy; i < n; i++) {
char c = cbuf[i];
if (afterCr || c == NORMALIZED_LINE_TERM_CHAR) {
final String newLineTerm;
final int newLineOffset;
if (afterCr && c != NORMALIZED_LINE_TERM_CHAR) {
// we saw a \r last iteration, but didn't copy it
// it's not followed by an \n
newLineTerm = CR;
newLineOffset = bufOffset + i + offsetDiff;
if (i > 0) {
cbuf[i - 1] = NORMALIZED_LINE_TERM_CHAR; // replace the \r with a \n
} else {
// The CR was trailing a buffer, so it's not in the current buffer and wasn't copied.
// Append a newline.
result.append(NORMALIZED_LINE_TERM);
}
} else {
if (afterCr) {
newLineTerm = CRLF;
if (i > 0) {
cbuf[i - 1] = NORMALIZED_LINE_TERM_CHAR; // replace the \r with a \n
// copy up to and including the \r, which was replaced
result.append(cbuf, nextCharToCopy, i - nextCharToCopy);
nextCharToCopy = i + 1; // set the next char to copy to after the \n
}
// Since we're replacing a 2-char delimiter with a single char,
// the offset of the line needs to be adjusted.
offsetDiff--;
} else {
// just \n
newLineTerm = LF;
}
newLineOffset = bufOffset + i + offsetDiff + 1;
}
positionerBuilder.addLineEndAtOffset(newLineOffset);
detectedLineTerm = detectLineTerm(detectedLineTerm, newLineTerm, fallbackLineSep);
}
afterCr = c == '\r';
} // end for
if (nextCharToCopy != n) {
int numRemaining = n - nextCharToCopy;
if (afterCr) {
numRemaining--; // don't copy the \r, it could still be followed by \n on the next round
}
result.append(cbuf, nextCharToCopy, numRemaining);
}
nextCharToCopy = 0;
bufOffset += n + offsetDiff;
n = input.read(cbuf);
} // end while
if (afterCr) { // we're at EOF, so it's not followed by \n
result.append(NORMALIZED_LINE_TERM);
positionerBuilder.addLineEndAtOffset(bufOffset);
detectedLineTerm = detectLineTerm(detectedLineTerm, CR, fallbackLineSep);
}
if (detectedLineTerm == null) {
// no line terminator in text
detectedLineTerm = fallbackLineSep;
}
return new TextFileContent(Chars.wrap(result), detectedLineTerm, checksum.getValue(), positionerBuilder.build(bufOffset));
}
private static String detectLineTerm(@Nullable String curLineTerm, String newLineTerm, String fallback) {
if (curLineTerm == null) {
return newLineTerm;
}
if (curLineTerm.equals(newLineTerm)) {
return curLineTerm;
} else {
// todo maybe we should report a warning
return fallback; // mixed line terminators, fallback to system default
}
}
private static long getCheckSum(CharSequence cs) {
Checksum checksum = newChecksum();
updateChecksum(checksum, CharBuffer.wrap(cs));
return checksum.getValue();
}
private static void updateChecksum(Checksum checksum, CharBuffer chars) {
ByteBuffer bytes = StandardCharsets.UTF_8.encode(chars);
// note: this is only needed on Java 8. On Java 9, Checksum#update(ByteBuffer) has been added.
assert bytes.hasArray() : "Encoder should produce a heap buffer";
checksum.update(bytes.array(), bytes.arrayOffset(), bytes.remaining());
}
private static Checksum newChecksum() {
return new Adler32();
}
}
| 12,750 | 38.354938 | 157 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/Chars.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.io.Writer;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.Iterator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.apache.commons.lang3.StringUtils;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.util.IteratorUtil.AbstractIterator;
/**
* View on a string which doesn't copy the array for subsequence operations.
* This view is immutable. Since it uses a string internally it benefits from
* Java 9's compacting feature, it can also be efficiently created from
* a StringBuilder. When confronted with an instance of this interface, please
* don't create substrings unnecessarily. Both {@link #subSequence(int, int)}
* and {@link #slice(int, int)} can cut out a subsequence without copying
* the underlying byte array. The {@link Pattern} API also works perfectly
* on arbitrary {@link CharSequence}s, not just on strings. Lastly some
* methods here provided provide mediated access to the underlying string,
* which for many use cases is much more optimal than using this CharSequence
* directly, eg {@link #appendChars(StringBuilder)}, {@link #writeFully(Writer)}.
*
* @see Chars#wrap(CharSequence) Chars::wrap, the factory method
*/
public final class Chars implements CharSequence {
public static final Chars EMPTY = wrap("");
/**
* Special sentinel used by {@link #lines()}.
*/
private static final int NOT_TRIED = -2;
/**
* See {@link StringUtils#INDEX_NOT_FOUND}.
*/
private static final int NOT_FOUND = -1;
private final String str;
private final int start;
private final int len;
private Chars(String str, int start, int len) {
validateRangeWithAssert(start, len, str.length());
this.str = str;
this.start = start;
this.len = len;
}
private int idx(int off) {
return this.start + off;
}
/** Whether this slice is the empty string. */
@SuppressWarnings("PMD.MissingOverride") // with Java 15, isEmpty() has been added to java.lang.CharSequence (#4291)
public boolean isEmpty() {
return len == 0;
}
/**
* Wraps the given char sequence into a {@link Chars}. This may
* call {@link CharSequence#toString()}. If the sequence is already
* a {@link Chars}, returns it. This is the main factory method for
* this class. You can eg pass a StringBuilder if you want.
*/
public static Chars wrap(CharSequence chars) {
if (chars instanceof Chars) {
return (Chars) chars;
}
return new Chars(chars.toString(), 0, chars.length());
}
/**
* Write all characters of this buffer into the given writer.
*
* @param writer A writer
*
* @throws NullPointerException If the writer is null
*/
public void writeFully(@NonNull Writer writer) throws IOException {
write(writer, 0, length());
}
/**
* Write a range of characters to the given writer.
*
* @param writer A writer
* @param start Start offset in this CharSequence
* @param count Number of characters
*
* @throws IOException If the writer throws
* @throws IndexOutOfBoundsException See {@link Writer#write(int)}
*/
public void write(@NonNull Writer writer, int start, int count) throws IOException {
writer.write(str, idx(start), count);
}
/**
* Copies 'count' characters from index 'srcBegin' into the given array,
* starting at 'dstBegin'.
*
* @param srcBegin Start offset in this CharSequence
* @param cbuf Character array
* @param count Number of characters to copy
* @param dstBegin Start index in the array
*
* @throws NullPointerException If the array is null (may)
* @throws IndexOutOfBoundsException See {@link String#getChars(int, int, char[], int)}
*/
public void getChars(int srcBegin, char @NonNull [] cbuf, int dstBegin, int count) {
validateRange(srcBegin, count, length());
int start = idx(srcBegin);
str.getChars(start, start + count, cbuf, dstBegin);
}
/**
* Appends the character range identified by start and end offset into
* the string builder. This is much more efficient than calling
* {@link StringBuilder#append(CharSequence)} with this as the
* parameter, especially on Java 9+.
*
* @param start Start index (inclusive)
* @param end End index (exclusive)
*
* @throws IndexOutOfBoundsException See {@link StringBuilder#append(CharSequence, int, int)}
*/
public void appendChars(StringBuilder sb, int start, int end) {
if (end == 0) {
return;
}
sb.append(str, idx(start), idx(end));
}
/**
* Append this character sequence on the given stringbuilder.
* This is much more efficient than calling {@link StringBuilder#append(CharSequence)}
* with this as the parameter, especially on Java 9+.
*
* @param sb String builder
*/
public void appendChars(StringBuilder sb) {
sb.append(str, start, start + len);
}
/**
* Returns the characters of this charsequence encoded with the
* given charset.
*/
public ByteBuffer getBytes(Charset charset) {
return charset.encode(CharBuffer.wrap(str, start, start + len));
}
/**
* See {@link String#indexOf(String, int)}.
*/
public int indexOf(String searched, int fromIndex) {
// max index in the string at which the search string may start
final int max = start + len - searched.length();
if (fromIndex < 0 || max < start + fromIndex) {
return NOT_FOUND;
} else if (searched.isEmpty()) {
return 0;
}
final char fst = searched.charAt(0);
int strpos = str.indexOf(fst, idx(fromIndex));
while (strpos != NOT_FOUND && strpos <= max) {
if (str.startsWith(searched, strpos)) {
return strpos - start;
}
strpos = str.indexOf(fst, strpos + 1);
}
return NOT_FOUND;
}
/**
* See {@link String#indexOf(int, int)}.
*/
public int indexOf(int ch, int fromIndex) {
if (fromIndex < 0 || fromIndex >= len) {
return NOT_FOUND;
}
// we want to avoid searching too far in the string
// so we don't use String#indexOf, as it would be looking
// in the rest of the file too, which in the worst case is
// horrible
int max = start + len;
for (int i = start + fromIndex; i < max; i++) {
char c = str.charAt(i);
if (c == ch) {
return i - start;
}
}
return NOT_FOUND;
}
/**
* See {@link String#lastIndexOf(int, int)}.
*/
public int lastIndexOf(int ch, int fromIndex) {
if (fromIndex < 0 || fromIndex >= len) {
return NOT_FOUND;
}
// we want to avoid searching too far in the string
// so we don't use String#indexOf, as it would be looking
// in the rest of the file too, which in the worst case is
// horrible
for (int i = start + fromIndex; i >= start; i--) {
char c = str.charAt(i);
if (c == ch) {
return i - start;
}
}
return NOT_FOUND;
}
/**
* See {@link String#startsWith(String, int)}.
*/
public boolean startsWith(String prefix, int fromIndex) {
if (fromIndex < 0 || fromIndex + prefix.length() > len) {
return false;
}
return str.startsWith(prefix, idx(fromIndex));
}
/**
* See {@link String#startsWith(String)}.
*/
public boolean startsWith(String prefix) {
return startsWith(prefix, 0);
}
public boolean startsWith(char prefix, int fromIndex) {
if (fromIndex < 0 || fromIndex + 1 > len) {
return false;
}
return str.charAt(idx(fromIndex)) == prefix;
}
/**
* See {@link String#endsWith(String)}.
*/
public boolean endsWith(String suffix) {
return startsWith(suffix, length() - suffix.length());
}
/**
* Returns a subsequence which does not start with control characters ({@code <= 32}).
* This is consistent with {@link String#trim()}.
*/
public Chars trimStart() {
int i = start;
int maxIdx = start + len;
while (i < maxIdx && str.charAt(i) <= 32) {
i++;
}
i -= start;
return slice(i, len - i);
}
/**
* Returns a subsequence which does not end with control characters ({@code <= 32}).
* This is consistent with {@link String#trim()}.
*/
public Chars trimEnd() {
int i = start + len;
while (i > start && str.charAt(i - 1) <= 32) {
i--;
}
return slice(0, i - start);
}
/**
* Like {@link String#trim()}.
*/
public Chars trim() {
return trimStart().trimEnd();
}
/**
* Remove trailing and leading blank lines. The resulting string
* does not end with a line terminator.
*/
public Chars trimBlankLines() {
int offsetOfFirstNonBlankChar = length();
for (int i = 0; i < length(); i++) {
if (!Character.isWhitespace(charAt(i))) {
offsetOfFirstNonBlankChar = i;
break;
}
}
int offsetOfLastNonBlankChar = 0;
for (int i = length() - 1; i > offsetOfFirstNonBlankChar; i--) {
if (!Character.isWhitespace(charAt(i))) {
offsetOfLastNonBlankChar = i;
break;
}
}
// look backwards before the first non-blank char
int cutFromInclusive = lastIndexOf('\n', offsetOfFirstNonBlankChar);
// If firstNonBlankLineStart == -1, ie we're on the first line,
// we want to start at zero: then we add 1 to get 0
// If firstNonBlankLineStart >= 0, then it's the index of the
// \n, we want to cut right after that, so we add 1.
cutFromInclusive += 1;
// look forwards after the last non-blank char
int cutUntilExclusive = indexOf('\n', offsetOfLastNonBlankChar);
if (cutUntilExclusive == StringUtils.INDEX_NOT_FOUND) {
cutUntilExclusive = length();
}
return subSequence(cutFromInclusive, cutUntilExclusive);
}
/**
* Remove the suffix if it is present, otherwise returns this.
*/
public Chars removeSuffix(String charSeq) {
int trimmedLen = length() - charSeq.length();
if (startsWith(charSeq, trimmedLen)) {
return slice(0, trimmedLen);
}
return this;
}
/**
* Remove the prefix if it is present, otherwise returns this.
*/
public Chars removePrefix(String charSeq) {
if (startsWith(charSeq)) {
return subSequence(charSeq.length(), length());
}
return this;
}
/**
* Returns true if this char sequence is logically equal to the
* parameter. This means they're equal character-by-character. This
* is more general than {@link #equals(Object)}, which will only answer
* true if the parameter is a {@link Chars}.
*
* @param cs Another char sequence
* @param ignoreCase Whether to ignore case
*
* @return True if both sequences are logically equal
*/
public boolean contentEquals(CharSequence cs, boolean ignoreCase) {
if (cs instanceof Chars) {
Chars chars2 = (Chars) cs;
return len == chars2.len && str.regionMatches(ignoreCase, start, chars2.str, chars2.start, len);
} else {
return length() == cs.length() && str.regionMatches(ignoreCase, start, cs.toString(), 0, len);
}
}
/**
* Like {@link #contentEquals(CharSequence, boolean)}, considering
* case distinctions.
*
* @param cs A char sequence
*
* @return True if both sequences are logically equal, considering case
*/
public boolean contentEquals(CharSequence cs) {
return contentEquals(cs, false);
}
@Override
public int length() {
return len;
}
@Override
public char charAt(int index) {
if (index < 0 || index >= len) {
throw new StringIndexOutOfBoundsException(index);
}
return str.charAt(idx(index));
}
@Override
public Chars subSequence(int start, int end) {
return slice(start, end - start);
}
/**
* Returns the subsequence that starts at the given offset and ends
* at the end of this string. Similar to {@link String#substring(int)}.
*/
public Chars subSequence(int start) {
return slice(start, len - start);
}
/**
* Slice a region of text.
*
* @param region A region
*
* @return A Chars instance
*
* @throws IndexOutOfBoundsException If the region is not a valid range
*/
public Chars slice(TextRegion region) {
return slice(region.getStartOffset(), region.getLength());
}
/**
* Like {@link #subSequence(int, int)} but with offset + length instead
* of start + end.
*
* @param off Start of the slice ({@code 0 <= off < this.length()})
* @param len Length of the slice ({@code 0 <= len <= this.length() - off})
*
* @return A Chars instance
*
* @throws IndexOutOfBoundsException If the parameters are not a valid range
*/
public Chars slice(int off, int len) {
validateRange(off, len, this.len);
if (len == 0) {
return EMPTY;
} else if (off == 0 && len == this.len) {
return this;
}
return new Chars(str, idx(off), len);
}
/**
* Returns the substring between the given offsets.
* given length.
*
* <p>Note: Unlike slice or subSequence, this method will create a
* new String which involves copying the backing char array. Don't
* use it unnecessarily.
*
* @param start Start offset ({@code 0 <= start < this.length()})
* @param end End offset ({@code start <= end <= this.length()})
*
* @return A substring
*
* @throws IndexOutOfBoundsException If the parameters are not a valid range
* @see String#substring(int, int)
*/
public String substring(int start, int end) {
validateRange(start, end - start, this.len);
return str.substring(idx(start), idx(end));
}
private static void validateRangeWithAssert(int off, int len, int bound) {
assert len >= 0 && off >= 0 && off + len <= bound : invalidRange(off, len, bound);
}
private static void validateRange(int off, int len, int bound) {
if (len < 0 || off < 0 || off + len > bound) {
throw new IndexOutOfBoundsException(invalidRange(off, len, bound));
}
}
private static String invalidRange(int off, int len, int bound) {
return "Invalid range [" + off + ", " + (off + len) + "[ (length " + len + ") in string of length " + bound;
}
@Override
public @NonNull String toString() {
// this already avoids the copy if start == 0 && len == str.length()
return str.substring(start, start + len);
}
@Override
public boolean equals(Object o) {
return this == o || o instanceof Chars && contentEquals((Chars) o);
}
@Override
public int hashCode() {
if (isFullString()) {
return str.hashCode(); // hashcode is cached on strings
}
int h = 0;
for (int i = start, end = start + len; i < end; i++) {
h = h * 31 + str.charAt(i);
}
return h;
}
// test only
boolean isFullString() {
return start == 0 && len == str.length();
}
/**
* Returns an iterable over the lines of this char sequence. The lines
* are yielded without line separators. Like {@link BufferedReader#readLine()},
* a line delimiter is {@code CR}, {@code LF} or {@code CR+LF}.
*/
public Iterable<Chars> lines() {
return () -> new Iterator<Chars>() {
final int max = len;
int pos = 0;
// If those are NOT_TRIED, then we should scan ahead to find them
// If the scan fails then they'll stay -1 forever and won't be tried again.
// This is important to scan in documents where we know there are no
// CR characters, as in our normalized TextFileContent.
int nextCr = NOT_TRIED;
int nextLf = NOT_TRIED;
@Override
public boolean hasNext() {
return pos < max;
}
@Override
public Chars next() {
final int curPos = pos;
if (nextCr == NOT_TRIED) {
nextCr = indexOf('\r', curPos);
}
if (nextLf == NOT_TRIED) {
nextLf = indexOf('\n', curPos);
}
final int cr = nextCr;
final int lf = nextLf;
if (cr != NOT_FOUND && lf != NOT_FOUND) {
// found both CR and LF
int min = Math.min(cr, lf);
if (lf == cr + 1) {
// CRLF
pos = lf + 1;
nextCr = NOT_TRIED;
nextLf = NOT_TRIED;
} else {
pos = min + 1;
resetLookahead(cr, min);
}
return subSequence(curPos, min);
} else if (cr == NOT_FOUND && lf == NOT_FOUND) {
// no following line terminator, cut until the end
pos = max;
return subSequence(curPos, max);
} else {
// lf or cr (exactly one is != -1 and max returns that one)
int idx = Math.max(cr, lf);
resetLookahead(cr, idx);
pos = idx + 1;
return subSequence(curPos, idx);
}
}
private void resetLookahead(int cr, int idx) {
if (idx == cr) {
nextCr = NOT_TRIED;
} else {
nextLf = NOT_TRIED;
}
}
};
}
/**
* Returns a stream of lines yielded by {@link #lines()}.
*/
public Stream<Chars> lineStream() {
return StreamSupport.stream(lines().spliterator(), false);
}
/**
* Returns a new stringbuilder containing the whole contents of this
* char sequence.
*/
public StringBuilder toStringBuilder() {
StringBuilder sb = new StringBuilder(length());
appendChars(sb);
return sb;
}
/**
* Split this slice into subslices, like {@link String#split(String)},
* except it's iterated lazily.
*/
public Iterable<Chars> splits(Pattern regex) {
return () -> new AbstractIterator<Chars>() {
final Matcher matcher = regex.matcher(Chars.this);
int lastPos = 0;
private boolean shouldRetry() {
if (matcher.find()) {
if (matcher.start() == 0 && matcher.end() == 0 && lastPos != len) {
return true; // zero length match at the start, we should retry once
}
setNext(subSequence(lastPos, matcher.start()));
lastPos = matcher.end();
} else if (lastPos != len) {
setNext(subSequence(lastPos, len));
} else {
done();
}
return false;
}
@Override
protected void computeNext() {
if (matcher.hitEnd()) {
done();
} else if (shouldRetry()) {
shouldRetry();
}
}
};
}
/**
* Returns a new reader for the whole contents of this char sequence.
*/
public Reader newReader() {
return new CharsReader(this);
}
private static final class CharsReader extends Reader {
private Chars chars;
private int pos;
private final int max;
private int mark = -1;
private CharsReader(Chars chars) {
this.chars = chars;
this.pos = chars.start;
this.max = chars.start + chars.len;
}
@Override
public int read(char @NonNull [] cbuf, int off, int len) throws IOException {
if (len < 0 || off < 0 || off + len > cbuf.length) {
throw new IndexOutOfBoundsException();
}
ensureOpen();
if (pos >= max) {
return NOT_FOUND;
}
int toRead = Integer.min(max - pos, len);
chars.str.getChars(pos, pos + toRead, cbuf, off);
pos += toRead;
return toRead;
}
@Override
public int read() throws IOException {
ensureOpen();
return pos >= max ? NOT_FOUND : chars.str.charAt(pos++);
}
@Override
public long skip(long n) throws IOException {
ensureOpen();
int oldPos = pos;
pos = Math.min(max, pos + (int) n);
return pos - oldPos;
}
private void ensureOpen() throws IOException {
if (chars == null) {
throw new IOException("Closed");
}
}
@Override
public void close() {
chars = null;
}
@Override
public void mark(int readAheadLimit) {
mark = pos;
}
@Override
public void reset() throws IOException {
ensureOpen();
if (mark == -1) {
throw new IOException("Reader was not marked");
}
pos = mark;
}
@Override
public boolean markSupported() {
return true;
}
}
}
| 22,748 | 30.639777 | 120 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.Closeable;
import java.io.IOException;
import java.io.Reader;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.cpd.SourceCode;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.util.datasource.DataSource;
/**
* Represents a textual document, providing methods to edit it incrementally
* and address regions of text. A text document delegates IO operations
* to a {@link TextFile}. It reflects some in-memory snapshot of the file,
* though the file may still be edited externally.
*
* <p>TextDocument is meant to replace CPD's {@link SourceCode} and PMD's
* {@link DataSource}, though the abstraction level of {@link DataSource}
* is the {@link TextFile}.
*
* <p>Note that the backing {@link TextFile} is purposefully not accessible
* from a text document. Exposing it here could lead to files being written
* to from within rules, while we want to eventually build an API that allows
* file edition based on AST manipulation.
*
* <h3>Coordinates in TextDocument</h3>
*
* This interface is an abstraction over a piece of text, which might not
* correspond to the backing source file. This allows the document to
* be a view on a piece of a larger document (eg, a Javadoc comment, or
* a string in which a language is injected). Another use case is to perform
* escape translation, while preserving the line breaks of the original source.
*
* <p>This complicates addressing within a text document. To explain it,
* consider that there is always *one* text document that corresponds to
* the backing text file, which we call the <i>root</i> text document.
* Logical documents built on top of it are called <i>views</i>.
*
* Text documents use <i>offsets</i> and {@link TextRegion} to address their
* contents. These are always relative to the {@linkplain #getText() text} of
* the document. Line and column information are provided by {@link FileLocation}
* (see {@link #toLocation(TextRegion)}), and are always absolute (ie,
* represent actual source lines in the file).
*
* <p>For instance, say you have the following file (and root text document):
* <pre>{@code
* l1
* l2 (* comment *)
* l3
* }</pre>
* and you create a view for just the section {@code (* comment *)}.
* Then, that view's offset 0 (start of the document) will map
* to the {@code (} character, while the root document's offset 0 maps
* to the start of {@code l1}. When calling {@code toLocation(caretAt(0))},
* the view will however return {@code line 2, column 4}, ie, a line/column
* that can be found when inspecting the file.
*
* <p>To reduce the potential for mistakes, views do not provide access
* to their underlying text document. That way, nodes only have access
* to a single document, and their offsets can be assumed to be in the
* coordinate system of that document.
*
* <p>This interface does not provide a way to obtain line/column
* coordinates that are relative to a view's coordinate system. This
* would complicate the construction of views significantly.
*/
public interface TextDocument extends Closeable {
// todo logical sub-documents, to support embedded languages
// ideally, just slice the text, and share the positioner
// a problem with document slices becomes reference counting for the close routine
// todo text edition (there are some reverted commits in the branch
// with part of this, including a lot of tests)
/**
* Returns the language version that should be used to parse this file.
*/
LanguageVersion getLanguageVersion();
/**
* Returns {@link TextFile#getFileId()} for the text file backing this document.
*/
FileId getFileId();
/**
* Returns the current text of this document. Note that this doesn't take
* external modifications to the {@link TextFile} into account.
*
* <p>Line endings are normalized to {@link TextFileContent#NORMALIZED_LINE_TERM}.
*
* @see TextFileContent#getNormalizedText()
*/
Chars getText();
/**
* Returns a slice of the original text. Note that this is not the
* same as {@code getText().subsequence}, as if this document has
* translated escapes, the returned char slice will contain the
* untranslated escapes, whereas {@link #getText()} would return
* the translated characters.
*
* @param region A region, in the coordinate system of this document
*
* @return The slice of the original text that corresponds to the region
*
* @throws IndexOutOfBoundsException If the region is not a valid range
*/
Chars sliceOriginalText(TextRegion region);
/**
* Returns a slice of the source text. This is always equal to
* {@code getText().slice(region)}, as the text is the translated text.
*
* @param region A region, in the coordinate system of this document
*
* @return The slice of the original text that corresponds to the region
*
* @throws IndexOutOfBoundsException If the region is not a valid range
*/
default Chars sliceTranslatedText(TextRegion region) {
return getText().slice(region);
}
/**
* Returns a checksum for the contents of the file.
*
* @see TextFileContent#getCheckSum()
*/
long getCheckSum();
/**
* Returns a reader over the text of this document.
*/
default Reader newReader() {
return getText().newReader();
}
/**
* Returns the length in characters of the {@linkplain #getText() text}.
*/
default int getLength() {
return getText().length();
}
/**
* Returns a text region that corresponds to the entire document,
* in the coordinate system of this document.
*/
default TextRegion getEntireRegion() {
return TextRegion.fromOffsetLength(0, getLength());
}
/**
* Returns a region that spans the text of all the given lines.
* This is intended to provide a replacement for {@link SourceCode#getSlice(int, int)}.
*
* <p>Note that, as line numbers may only be obtained from {@link #toLocation(TextRegion)},
* and hence are line numbers of the original source, both parameters
* must be line numbers of the source text and not the translated text
* that this represents.
*
* @param startLineInclusive Inclusive start line number (1-based)
* @param endLineInclusive Inclusive end line number (1-based)
*
* @throws IndexOutOfBoundsException If the arguments do not identify
* a valid region in the source document
*/
TextRegion createLineRange(int startLineInclusive, int endLineInclusive);
/**
* Turn a text region into a {@link FileLocation}. This computes
* the line/column information for both start and end offset of
* the region.
*
* @param region A region, in the coordinate system of this document
*
* @return A new file position, with absolute coordinates
*
* @throws IndexOutOfBoundsException If the argument is not a valid region in this document
*/
FileLocation toLocation(TextRegion region);
/**
* Returns the line and column at the given offset (inclusive).
* Note that the line/column cannot be converted back. They are
* absolute in the coordinate system of the original document.
*
* @param offset A source offset (0-based), can range in {@code [0, length]}.
*
* @throws IndexOutOfBoundsException if the offset is out of bounds
*/
default TextPos2d lineColumnAtOffset(int offset) {
return lineColumnAtOffset(offset, true);
}
/**
* Returns the line and column at the given offset.
* Both the input offset and the output range are in the coordinates
* of this document.
*
* @param offset A source offset (0-based), can range in {@code [0, length]}.
* @param inclusive If the offset falls right after a line terminator,
* two behaviours are possible. If the parameter is true,
* choose the position at the start of the next line,
* otherwise choose the position at the end of the line.
*
* @return A position, in the coordinate system of the root document
*
* @throws IndexOutOfBoundsException if the offset is out of bounds
*/
TextPos2d lineColumnAtOffset(int offset, boolean inclusive);
/**
* Closing a document closes the underlying {@link TextFile}.
* New editors cannot be produced after that, and the document otherwise
* remains in its current state.
*
* @throws IOException If {@link TextFile#close()} throws
* @throws IllegalStateException If an editor is currently open. In this case
* the editor is rendered ineffective before the
* exception is thrown. This indicates a programming
* mistake.
*/
@Override
void close() throws IOException;
/**
* Create a new text document for the given text file. The document's
* coordinate system is the same as the original text file.
*
* @param textFile A text file
*
* @return A new text document
*
* @throws IOException If the file cannot be read ({@link TextFile#readContents()})
* @throws NullPointerException If the parameter is null
*/
static TextDocument create(TextFile textFile) throws IOException {
return new RootTextDocument(textFile);
}
/**
* Returns a read-only document for the given text.
*
* @see TextFile#forCharSeq(CharSequence, FileId, LanguageVersion)
*/
static TextDocument readOnlyString(final CharSequence source, LanguageVersion lv) {
return readOnlyString(source, FileId.UNKNOWN, lv);
}
/**
* Returns a read-only document for the given text. This works as
* if by calling {@link TextDocument#create(TextFile)} on a textfile
* produced by {@link TextFile#forCharSeq(CharSequence, String, LanguageVersion) forString},
* but doesn't throw {@link IOException}, as such text files will
* not throw.
*
* @see TextFile#forCharSeq(CharSequence, FileId, LanguageVersion)
*/
@SuppressWarnings("PMD.CloseResource")
static TextDocument readOnlyString(@NonNull CharSequence source, @NonNull FileId filename, @NonNull LanguageVersion lv) {
TextFile textFile = TextFile.forCharSeq(source, filename, lv);
try {
return create(textFile);
} catch (IOException e) {
throw new AssertionError("String text file should never throw IOException", e);
}
}
@Deprecated
@DeprecatedUntil700
// note: this method is for backwards compatibility only - currently used by pmd-designer
static TextDocument readOnlyString(@NonNull CharSequence source, @NonNull String filename, @NonNull LanguageVersion lv) {
return readOnlyString(source, FileId.fromPathLikeString(filename), lv);
}
}
| 11,379 | 37.972603 | 125 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextRegion.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.util.Comparator;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* A contiguous range of text in a {@link TextDocument}. Empty regions may
* be thought of like caret positions in an IDE. An empty region at offset
* {@code n} does not contain the character at offset {@code n} in the
* document, but if it were a caret, typing a character {@code c} would
* make {@code c} the character at offset {@code n} in the document.
*
* <p>Line and column information may be added by {@link TextDocument#toLocation(TextRegion)}.
*
* <p>Regions are not bound to a specific document, keeping a reference
* to them does not prevent the document from being garbage-collected.
*
* <p>Regions are represented as a simple offset+length tuple. In a document,
* valid start offsets range from 0 to {@link TextDocument#getLength()} (inclusive).
* The sum {@code startOffset + length} must range from {@code startOffset}
* to {@link TextRegion#getLength()} (inclusive).
*
* <p>Those rules make the region starting at {@link TextDocument#getLength()}
* with length 0 a valid region (the caret position at the end of the document).
*
* <p>For example, for a document of length 1 ({@code "c"}), there
* are only three valid regions:
* <pre>{@code
* [[c : caret position at offset 0 (empty region)
* [c[ : range containing the character
* c[[ : caret position at offset 1 (empty region)
* }</pre>
*/
public final class TextRegion implements Comparable<TextRegion> {
private static final Comparator<TextRegion> COMPARATOR =
Comparator.comparingInt(TextRegion::getStartOffset)
.thenComparingInt(TextRegion::getLength);
private final int startOffset;
private final int length;
private TextRegion(int startOffset, int length) {
this.startOffset = startOffset;
this.length = length;
assert startOffset >= 0 && length >= 0 : "Invalid region " + this;
}
/** 0-based, inclusive index. */
public int getStartOffset() {
return startOffset;
}
/** 0-based, exclusive index. */
public int getEndOffset() {
return startOffset + length;
}
/**
* Returns the length of the region in characters. This is the difference
* between start offset and end offset. All characters have length 1,
* including {@code '\t'}. The sequence {@code "\r\n"} has length 2 and
* not 1.
*/
public int getLength() {
return length;
}
/**
* Returns true if the region contains no characters. In that case
* it can be viewed as a caret position, and e.g. used for text insertion.
*/
public boolean isEmpty() {
return length == 0;
}
/**
* Returns true if this region contains the character at the given
* offset. Note that a region with length zero does not even contain
* the character at its start offset.
*
* @param offset Offset of a character
*/
public boolean contains(int offset) {
return getStartOffset() <= offset && offset < getEndOffset();
}
/**
* Returns true if this region contains the entirety of the other
* region. Any region contains itself.
*
* @param other Other region
*/
public boolean contains(TextRegion other) {
return this.getStartOffset() <= other.getStartOffset()
&& other.getEndOffset() <= this.getEndOffset();
}
/**
* Returns true if this region overlaps the other region by at
* least one character. This is a symmetric, reflexive relation.
*
* @param other Other region
*/
public boolean overlaps(TextRegion other) {
TextRegion intersection = TextRegion.intersect(this, other);
return intersection != null && intersection.getLength() != 0;
}
/**
* Returns a region that ends at the same point, but starts 'delta'
* characters before this region. If the delta is negative, then this
* shifts the start of the region to the right (but the end stays fixed).
*
* @throws AssertionError If the parameter cannot produce a valid region
*/
public TextRegion growLeft(int delta) {
assert delta + length >= 0 : "Left delta " + delta + " would produce a negative length region " + parThis();
assert startOffset - delta >= 0 : "Left delta " + delta + " would produce a region that starts before zero " + parThis();
return new TextRegion(startOffset - delta, delta + length);
}
/**
* Returns a region that starts at the same point, but ends 'delta'
* characters after this region. If the delta is negative, then this
* shifts the end of the region to the left (but the start stays fixed).
*
* @throws AssertionError If the delta is negative and less than the length of this region
*/
public TextRegion growRight(int delta) {
assert delta + length >= 0 : "Right delta " + delta + " would produce a negative length region " + parThis();
return new TextRegion(startOffset, delta + length);
}
/**
* Computes the intersection of this region with the other. This is the
* largest region that this region and the parameter both contain.
* It may have length zero, or not exist (if the regions are completely
* disjoint).
*
* @return The intersection, if it exists
*/
public static @Nullable TextRegion intersect(TextRegion r1, TextRegion r2) {
int start = Math.max(r1.getStartOffset(), r2.getStartOffset());
int end = Math.min(r1.getEndOffset(), r2.getEndOffset());
return start <= end ? fromBothOffsets(start, end)
: null;
}
/**
* Computes the union of this region with the other. This is the
* smallest region that contains both this region and the parameter.
*
* @return The union of both regions
*/
public static TextRegion union(TextRegion r1, TextRegion r2) {
if (r1.equals(r2)) {
return r1;
}
int start = Math.min(r1.getStartOffset(), r2.getStartOffset());
int end = Math.max(r1.getEndOffset(), r2.getEndOffset());
return fromBothOffsets(start, end);
}
/**
* Builds a new region from offset and length.
*
* @throws AssertionError If either parameter is negative
*/
public static TextRegion fromOffsetLength(int startOffset, int length) {
return new TextRegion(startOffset, length);
}
/**
* Builds a new region from start and end offset.
*
* @param startOffset Start offset
* @param endOffset End offset
*
* @throws AssertionError If either offset is negative, or the two
* offsets are not ordered
*/
public static TextRegion fromBothOffsets(int startOffset, int endOffset) {
return new TextRegion(startOffset, endOffset - startOffset);
}
/**
* Builds a new region with zero length and placed at the given offset.
*
* @param startOffset Offset for start and end of the position.
*
* @throws AssertionError If the offset is negative
*/
public static TextRegion caretAt(int startOffset) {
return new TextRegion(startOffset, 0);
}
/**
* Checks that the parameters are a valid region, this is provided
* to debug, will be a noop unless assertions are enabled.
*/
public static boolean isValidRegion(int startOffset, int endOffset, TextDocument doc) {
assert startOffset >= 0 : "Negative start offset: " + startOffset;
assert endOffset >= 0 : "Negative end offset: " + endOffset;
assert startOffset <= endOffset : "Start and end offset are not ordered: " + startOffset + " > " + endOffset;
assert endOffset <= doc.getLength() : "End offset " + endOffset + " out of range for doc of length " + doc.getLength();
return true;
}
private String parThis() {
return "(" + this + ")";
}
/** Compares the start offset, then the length of a region. */
@Override
public int compareTo(@NonNull TextRegion o) {
return COMPARATOR.compare(this, o);
}
@Override
public String toString() {
return "Region(start=" + startOffset + ", len=" + length + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TextRegion)) {
return false;
}
TextRegion that = (TextRegion) o;
return startOffset == that.getStartOffset()
&& length == that.getLength();
}
@Override
public int hashCode() {
return startOffset * 31 + length;
}
}
| 8,961 | 34.283465 | 129 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/SourceCodePositioner.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.util.Arrays;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* Wraps a piece of text, and converts absolute offsets to line/column
* coordinates, and back. This is used by the {@link TextDocument} implementation.
*
* <p>This used to be public. We don't need it anymore, {@link TextDocument}
* is a higher level abstraction.
*/
final class SourceCodePositioner {
// Idea from:
// http://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/javascript/jscomp/SourceFile.java
/**
* Each entry is the inclusive start offset of a line (zero based). Never empty.
* The last entry has the offset of the EOF, to avoid overflows.
*/
private final int[] lineOffsets;
private final int sourceCodeLength;
private SourceCodePositioner(int[] offsets, int len) {
this.lineOffsets = offsets;
this.sourceCodeLength = len;
}
// test only
int[] getLineOffsets() {
return lineOffsets;
}
TextPos2d lineColFromOffset(int offset, boolean inclusive) {
AssertionUtil.requireInInclusiveRange("offset", offset, 0, sourceCodeLength);
int line = searchLineOffset(offset);
int lineIdx = line - 1; // zero-based
if (lineIdx != 0 && offset == lineOffsets[lineIdx] && !inclusive) {
// we're precisely on the start of a line
// if inclusive, prefer the position at the end of the previous line
// This is a subtlety that the other methods for offset -> line do not
// handle. This is because an offset may be interpreted as the index
// of a character, or the caret position between two characters. This
// is relevant when building text regions, to respect inclusivity, etc.
return TextPos2d.pos2d(lineIdx, getLastColumnOfLine(lineIdx));
}
return TextPos2d.pos2d(line, 1 + offset - lineOffsets[lineIdx]);
}
/**
* Returns the line number of the character at the given offset.
*
* @param offset Offset in the document (zero-based)
*
* @return Line number (1-based), or -1
*
* @throws IndexOutOfBoundsException If the offset is invalid in this document
*/
public int lineNumberFromOffset(final int offset) {
AssertionUtil.requireIndexNonNegative("offset", offset);
if (offset > sourceCodeLength) {
return -1;
}
return searchLineOffset(offset);
}
private int searchLineOffset(int offset) {
int search = Arrays.binarySearch(lineOffsets, 0, lineOffsets.length - 1, offset);
return search >= 0 ? search + 1 : ~search;
}
/**
* Returns the column number of the character at the given offset.
* The offset is not relative to the line (the line number is just
* a hint). If the column number does not exist (on the given line),
* returns -1.
*
* @param lineNumber Line number (1-based)
* @param globalOffset Global offset in the document (zero-based)
*
* @return Column number (1-based), or -1
*
* @throws IndexOutOfBoundsException If the line number does not exist
*/
public int columnFromOffset(final int lineNumber, final int globalOffset) {
AssertionUtil.requireInPositiveRange("Line number", lineNumber, lineOffsets.length);
int lineIndex = lineNumber - 1;
if (globalOffset > lineOffsets[lineNumber]) {
// throw new IllegalArgumentException("Column " + (col + 1) + " does not exist on line " + lineNumber);
return -1;
}
return globalOffset - lineOffsets[lineIndex] + 1; // 1-based column offsets
}
/**
* Finds the offset of a position given (line,column) coordinates.
* Returns -1 if the parameters don't identify a caret position in
* the wrapped text.
*
* @param line Line number (1-based)
* @param column Column number (1-based)
*
* @return Text offset (zero-based), or -1
*/
public int offsetFromLineColumn(final int line, final int column) {
if (!isValidLine(line)) {
if (line == lineOffsets.length && column == 1) {
return sourceCodeLength;
}
return -1;
}
final int lineIdx = line - 1;
int bound = offsetOfEndOfLine(line);
int off = lineOffsets[lineIdx] + column - 1;
return off > bound ? -1 : off;
}
/**
* Returns the offset of the end of the given line. This is the caret
* position that follows the last character on the line (which includes
* the line terminator if any). This is the caret position at the
* start of the next line, except if the line is the last in the document.
*
* @param line Line number (1-based)
*
* @return Text offset
*
* @throws IndexOutOfBoundsException If the line is invalid
*/
public int offsetOfEndOfLine(final int line) {
if (!isValidLine(line)) {
throw new IndexOutOfBoundsException(
line + " is not a valid line number, expected at most " + lineOffsets.length);
}
return lineOffsets[line];
}
boolean isValidLine(int line) {
return line >= 1 && line <= getLastLine();
}
/**
* Returns the number of lines, which is also the ordinal of the
* last line.
*/
public int getLastLine() {
return lineOffsets.length - 1;
}
public int getNumLines() {
return getLastLine();
}
/**
* Returns the last column number of the last line in the document.
*/
public int getLastLineColumn() {
return getLastColumnOfLine(getLastLine());
}
private int getLastColumnOfLine(int line) {
if (line == 0) {
return 1 + lineOffsets[line];
} else {
return 1 + lineOffsets[line] - lineOffsets[line - 1];
}
}
/**
* Builds a new positioner for the given char sequence.
* The char sequence should have its newline delimiters normalized
* to {@link TextFileContent#NORMALIZED_LINE_TERM}.
* The char sequence should not change state (eg a {@link StringBuilder})
* after construction, otherwise this positioner becomes unreliable.
*
* @param charSeq Text to wrap
*/
public static SourceCodePositioner create(CharSequence charSeq) {
final int len = charSeq.length();
Builder builder = new Builder();
int off = 0;
while (off < len) {
char c = charSeq.charAt(off);
if (c == '\n') {
builder.addLineEndAtOffset(off + 1);
}
off++;
}
return builder.build(len);
}
static final class Builder {
private int[] buf;
private int count = 1; // note the first element of the buffer is always 0 (the offset of the first line)
private int lastLineOffset = 0;
Builder(int bufSize) {
buf = new int[Math.max(1, bufSize)];
}
Builder() {
this(400);
}
/**
* Record a line ending. The parameter must be monotonically increasing.
*
* @param offset The index of the character right after the line
* terminator in the source text. Eg for {@code \r\n}
* or {@code \n}, it's the index of the {@code \n}, plus 1.
*/
public void addLineEndAtOffset(int offset) {
addLineImpl(offset, false);
}
private void addLineImpl(int offset, boolean isEof) {
if (offset < 0 || offset < lastLineOffset || offset == lastLineOffset && !isEof) {
throw new IllegalArgumentException(
"Invalid offset " + offset + " (last offset " + lastLineOffset + ")"
);
}
lastLineOffset = offset;
if (count >= buf.length) {
buf = Arrays.copyOf(buf, buf.length * 2 + 1);
}
buf[count] = offset;
count++;
}
public SourceCodePositioner build(int eofOffset) {
addLineImpl(eofOffset, true);
int[] finalOffsets = Arrays.copyOf(buf, count);
return new SourceCodePositioner(finalOffsets, eofOffset);
}
}
}
| 8,502 | 32.214844 | 117 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/FragmentedTextDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.LanguageVersion;
/**
* A text document built as a set of deltas over another document.
*/
final class FragmentedTextDocument extends BaseMappedDocument implements TextDocument {
private final Chars text;
private Fragment lastAccessedFragment;
FragmentedTextDocument(TextDocument base, Fragment firstFragment, Fragment lastFragment) {
super(base);
assert firstFragment != lastFragment; // NOPMD
this.text = toChars(firstFragment, lastFragment);
this.lastAccessedFragment = firstFragment;
}
private static Chars toChars(Fragment firstFragment, Fragment lastFragment) {
StringBuilder sb = new StringBuilder(lastFragment.outEnd());
Fragment f = firstFragment;
while (f != null) {
f.getChars().appendChars(sb);
f = f.next;
}
return Chars.wrap(sb);
}
@Override
public Chars getText() {
return text;
}
@Override
public LanguageVersion getLanguageVersion() {
return base.getLanguageVersion();
}
@Override
protected int localOffsetTransform(int outOffset, boolean inclusive) {
// caching the last accessed fragment instead of doing
// a linear search is critical for performance.
Fragment f = this.lastAccessedFragment;
if (f == null) {
return outOffset;
}
// Whether the fragment contains the offset we're looking for.
// Will be true most of the time.
boolean containsOffset =
f.outStart() <= outOffset && outOffset < f.outEnd();
if (!containsOffset) {
// Slow path, we must search for the fragment
// This optimisation is important, otherwise we have
// to search for very long times in some files
if (f.outEnd() < outOffset) { // search forward
while (f.next != null && f.outEnd() < outOffset) {
f = f.next;
}
} else { // search backwards
while (f.prev != null && outOffset <= f.outStart()) {
f = f.prev;
}
}
lastAccessedFragment = f;
}
if (inclusive && f.outEnd() == outOffset && f.next != null) {
// Inclusive means, the offset must correspond to a character in the source document.
// Here we have to skip forward to the fragment that contains the character, because
// it's not this one.
do {
f = f.next;
} while (f.next != null && f.outLen() == 0);
}
return f.outToIn(outOffset);
}
/**
* A delta from the original text to the translated text. This maps
* a region of the original document to some new characters.
*/
static final class Fragment {
private final Chars chars;
final @Nullable Fragment prev;
@Nullable Fragment next;
private final int inStart;
private final int inLength;
private final int outStart;
Fragment(@Nullable Fragment prev, int inLength, Chars chars) {
this.chars = chars;
this.prev = prev;
this.inLength = inLength;
if (prev != null) {
prev.next = this;
this.outStart = prev.outEnd();
this.inStart = prev.inEnd();
} else {
this.outStart = 0;
this.inStart = 0;
}
}
public Chars getChars() {
return chars;
}
int outStart() {
return outStart;
}
int outLen() {
return chars.length();
}
int outEnd() {
return outStart() + outLen();
}
int inStart() {
return inStart;
}
int inLen() {
return inLength;
}
int inEnd() {
return inStart() + inLen();
}
int outToIn(int outOffset) {
return inStart() + outOffset - outStart();
}
int inToOut(int inOffset) {
return inOffset - inStart() + outStart();
}
@Override
public String toString() {
return "Fragment[" + inStart() + ".." + inEnd() + " -> " + outStart() + ".." + outEnd() + "]" + chars;
}
}
}
| 4,606 | 27.438272 | 114 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/StringTextFile.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.StringUtil;
/**
* Read-only view on a string.
*/
class StringTextFile implements TextFile {
private final TextFileContent content;
private final FileId fileId;
private final LanguageVersion languageVersion;
StringTextFile(CharSequence source, FileId fileId, LanguageVersion languageVersion) {
AssertionUtil.requireParamNotNull("source text", source);
AssertionUtil.requireParamNotNull("file name", fileId);
AssertionUtil.requireParamNotNull("language version", languageVersion);
this.languageVersion = languageVersion;
this.content = TextFileContent.fromCharSeq(source);
this.fileId = fileId;
}
@Override
public @NonNull LanguageVersion getLanguageVersion() {
return languageVersion;
}
@Override
public FileId getFileId() {
return fileId;
}
@Override
public TextFileContent readContents() {
return content;
}
@Override
public void close() {
// nothing to do
}
@Override
public String toString() {
return "ReadOnlyString[" + StringUtil.elide(content.getNormalizedText().toString(), 40, "...") + "]";
}
}
| 1,503 | 24.931034 | 109 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextFileBuilder.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.Reader;
import java.nio.charset.Charset;
import java.nio.file.Path;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* A builder for a new text file.
* See static methods on {@link TextFile}.
*/
@SuppressWarnings("PMD.MissingStaticMethodInNonInstantiatableClass")
public abstract class TextFileBuilder {
protected final LanguageVersion languageVersion;
protected FileId parentFsId;
TextFileBuilder(LanguageVersion languageVersion) {
this.languageVersion = AssertionUtil.requireParamNotNull("language version", languageVersion);
}
/**
* Specify that the built file is read only. Some text files are
* always read-only.
*
* @return This builder
*/
public TextFileBuilder asReadOnly() {
// default is appropriate if the file type is always read-only
return this;
}
public TextFileBuilder setParentFsPath(@Nullable FileId fileId) {
parentFsId = fileId;
return this;
}
/**
* Creates and returns the new text file.
*/
public abstract TextFile build();
static class ForNio extends TextFileBuilder {
private final Path path;
private final Charset charset;
private boolean readOnly = false;
ForNio(LanguageVersion languageVersion, Path path, Charset charset) {
super(languageVersion);
this.path = AssertionUtil.requireParamNotNull("path", path);
this.charset = AssertionUtil.requireParamNotNull("charset", charset);
}
@Override
public TextFileBuilder asReadOnly() {
readOnly = true;
return this;
}
@Override
public TextFile build() {
return new NioTextFile(path, parentFsId, charset, languageVersion, readOnly);
}
}
static class ForCharSeq extends TextFileBuilder {
private final CharSequence charSequence;
private FileId fileId;
ForCharSeq(CharSequence charSequence, FileId fileId, LanguageVersion languageVersion) {
super(languageVersion);
this.charSequence = AssertionUtil.requireParamNotNull("charseq", charSequence);
this.fileId = AssertionUtil.requireParamNotNull("path ID", fileId);
}
@Override
public TextFileBuilder setParentFsPath(@Nullable FileId fileId) {
this.fileId = FileId.asChildOf(this.fileId, fileId);
return super.setParentFsPath(fileId);
}
@Override
public TextFile build() {
return new StringTextFile(charSequence, fileId, languageVersion);
}
}
static class ForReader extends TextFileBuilder {
private final Reader reader;
private FileId fileId;
ForReader(LanguageVersion languageVersion, Reader reader, FileId fileId) {
super(languageVersion);
this.reader = AssertionUtil.requireParamNotNull("reader", reader);
this.fileId = AssertionUtil.requireParamNotNull("path ID", fileId);
}
@Override
public TextFileBuilder setParentFsPath(@Nullable FileId fileId) {
this.fileId = FileId.asChildOf(this.fileId, fileId);
return super.setParentFsPath(fileId);
}
@Override
public TextFile build() {
return new ReaderTextFile(reader, fileId, languageVersion);
}
}
}
| 3,660 | 28.524194 | 102 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextPos2d.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import org.checkerframework.checker.nullness.qual.NonNull;
/**
* A place in a text document, represented as line/column information.
*/
public final class TextPos2d implements Comparable<TextPos2d> {
private final int line;
private final int column;
private TextPos2d(int line, int column) {
this.line = line;
this.column = column;
assert line > 0 && column > 0 : "Invalid position " + toTupleString();
}
/**
* Returns the (1-based) line number.
*/
public int getLine() {
return line;
}
/**
* Returns the (1-based) column number.
*/
public int getColumn() {
return column;
}
/**
* Builds a new region from offset and length.
*
* @throws AssertionError If either parameter is negative
*/
public static TextPos2d pos2d(int line, int column) {
return new TextPos2d(line, column);
}
/** Compares the start offset, then the length of a region. */
@Override
public int compareTo(@NonNull TextPos2d that) {
int cmp = Integer.compare(this.getLine(), that.getLine());
if (cmp != 0) {
return cmp;
}
return Integer.compare(this.getColumn(), that.getColumn());
}
/**
* Returns a string looking like {@code "(line=2, column=4)"}.
*/
public String toTupleString() {
return "(line=" + line + ", column=" + column + ")";
}
/**
* Returns a string looking like {@code "line 2, column 4")}.
*/
public String toDisplayStringInEnglish() {
return "line " + line + ", column " + column;
}
/**
* Returns a string looking like {@code "2:4")}.
*/
public String toDisplayStringWithColon() {
return line + ":" + column;
}
@Override
public String toString() {
return "!debug only! Pos2d(line=" + line + ", column=" + column + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TextPos2d)) {
return false;
}
TextPos2d that = (TextPos2d) o;
return line == that.getLine()
&& column == that.getColumn();
}
@Override
public int hashCode() {
return line * 31 + column;
}
}
| 2,464 | 23.166667 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/FileCollector.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystem;
import java.nio.file.FileSystemAlreadyExistsException;
import java.nio.file.FileSystemNotFoundException;
import java.nio.file.FileSystems;
import java.nio.file.FileVisitOption;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.ProviderNotFoundException;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.sourceforge.pmd.PmdAnalysis;
import net.sourceforge.pmd.annotation.Experimental;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.internal.util.IOUtil;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.LanguageVersionDiscoverer;
import net.sourceforge.pmd.util.AssertionUtil;
import net.sourceforge.pmd.util.log.MessageReporter;
/**
* Collects files to analyse before a PMD run. This API allows opening
* zip files and makes sure they will be closed at the end of a run.
*
* @author Clément Fournier
*/
@SuppressWarnings("PMD.CloseResource")
public final class FileCollector implements AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(FileCollector.class);
private final Set<TextFile> allFilesToProcess = new LinkedHashSet<>();
private final List<Closeable> resourcesToClose = new ArrayList<>();
private Charset charset = StandardCharsets.UTF_8;
private final LanguageVersionDiscoverer discoverer;
private final MessageReporter reporter;
private final FileId outerFsPath;
private boolean closed;
// construction
private FileCollector(LanguageVersionDiscoverer discoverer, MessageReporter reporter, FileId outerFsPath) {
this.discoverer = discoverer;
this.reporter = reporter;
this.outerFsPath = outerFsPath;
}
/**
* Internal API: please use {@link PmdAnalysis#files()} instead of
* creating a collector yourself.
*/
@InternalApi
public static FileCollector newCollector(LanguageVersionDiscoverer discoverer, MessageReporter reporter) {
return new FileCollector(discoverer, reporter, null);
}
/**
* Returns a new collector using the configuration except for the logger.
*/
@InternalApi
public FileCollector newCollector(MessageReporter logger) {
FileCollector fileCollector = new FileCollector(discoverer, logger, null);
fileCollector.charset = this.charset;
return fileCollector;
}
// public behaviour
/**
* Returns an unmodifiable list of all files that have been collected.
*
* <p>Internal: This might be unstable until PMD 7, but it's internal.
*/
@InternalApi
public List<TextFile> getCollectedFiles() {
if (closed) {
throw new IllegalStateException("Collector was closed!");
}
List<TextFile> allFilesToProcess = new ArrayList<>(this.allFilesToProcess);
allFilesToProcess.sort(Comparator.comparing(TextFile::getFileId));
return Collections.unmodifiableList(allFilesToProcess);
}
/**
* Returns the reporter for the file collection phase.
*/
@InternalApi
public MessageReporter getReporter() {
return reporter;
}
/**
* Close registered resources like zip files.
*/
@Override
public void close() {
if (closed) {
return;
}
closed = true;
Exception exception = IOUtil.closeAll(resourcesToClose);
if (exception != null) {
reporter.errorEx("Error while closing resources", exception);
}
}
// collection
/**
* Add a file, language is determined automatically from
* the extension/file patterns. The encoding is the current
* encoding ({@link #setCharset(Charset)}).
*
* @param file File to add
*
* @return True if the file has been added
*/
public boolean addFile(Path file) {
if (!Files.isRegularFile(file)) {
reporter.error("Not a regular file: {0}", file);
return false;
}
LanguageVersion languageVersion = discoverLanguage(file.toString());
return languageVersion != null
&& addFileImpl(TextFile.builderForPath(file, charset, languageVersion)
.setParentFsPath(outerFsPath)
.build());
}
/**
* Add a file with the given language (which overrides the file patterns).
* The encoding is the current encoding ({@link #setCharset(Charset)}).
*
* @param file Path to a file
* @param language A language. The language version will be taken to be the
* contextual default version.
*
* @return True if the file has been added
*/
public boolean addFile(Path file, Language language) {
AssertionUtil.requireParamNotNull("language", language);
if (!Files.isRegularFile(file)) {
reporter.error("Not a regular file: {0}", file);
return false;
}
LanguageVersion lv = discoverer.getDefaultLanguageVersion(language);
Objects.requireNonNull(lv);
return addFileImpl(TextFile.builderForPath(file, charset, lv)
.setParentFsPath(outerFsPath)
.build());
}
/**
* Add a pre-configured text file. The language version will be checked
* to match the contextual default for the language (the file cannot be added
* if it has a different version).
*
* @return True if the file has been added
*/
public boolean addFile(TextFile textFile) {
AssertionUtil.requireParamNotNull("textFile", textFile);
return checkContextualVersion(textFile) && addFileImpl(textFile);
}
/**
* Add a text file given its contents and a name. The language version
* will be determined from the name as usual.
*
* @return True if the file has been added
*/
public boolean addSourceFile(FileId fileId, String sourceContents) {
AssertionUtil.requireParamNotNull("sourceContents", sourceContents);
AssertionUtil.requireParamNotNull("pathId", fileId);
LanguageVersion version = discoverLanguage(fileId.getFileName());
return version != null
&& addFileImpl(TextFile.builderForCharSeq(sourceContents, fileId, version)
.setParentFsPath(outerFsPath)
.build());
}
private boolean addFileImpl(TextFile textFile) {
LOG.trace("Adding file {} (lang: {}) ", textFile.getFileId().getAbsolutePath(), textFile.getLanguageVersion().getTerseName());
if (allFilesToProcess.add(textFile)) {
return true;
}
LOG.trace("File was already collected, skipping");
return false;
}
private LanguageVersion discoverLanguage(String file) {
if (discoverer.getForcedVersion() != null) {
return discoverer.getForcedVersion();
}
List<Language> languages = discoverer.getLanguagesForFile(file);
if (languages.isEmpty()) {
LOG.trace("File {} matches no known language, ignoring", file);
return null;
}
Language lang = languages.get(0);
if (languages.size() > 1) {
LOG.trace("File {} matches multiple languages ({}), selecting {}", file, languages, lang);
}
return discoverer.getDefaultLanguageVersion(lang);
}
/**
* Whether the LanguageVersion of the file matches the one set in
* the {@link LanguageVersionDiscoverer}. This is required to ensure
* that all files for a given language have the same language version.
*/
private boolean checkContextualVersion(TextFile textFile) {
LanguageVersion fileVersion = textFile.getLanguageVersion();
Language language = fileVersion.getLanguage();
LanguageVersion contextVersion = discoverer.getDefaultLanguageVersion(language);
if (!fileVersion.equals(contextVersion)) {
reporter.error(
"Cannot add file {0}: version ''{1}'' does not match ''{2}''",
textFile.getFileId(),
fileVersion,
contextVersion
);
return false;
}
return true;
}
/**
* Add a directory recursively using {@link #addFile(Path)} on
* all regular files.
*
* @param dir Directory path
*
* @return True if the directory has been added
*/
public boolean addDirectory(Path dir) throws IOException {
if (!Files.isDirectory(dir)) {
reporter.error("Not a directory {0}", dir);
return false;
}
Files.walkFileTree(dir, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
FileCollector.this.addFile(file);
}
return super.visitFile(file, attrs);
}
});
return true;
}
/**
* Add a file or directory recursively. Language is determined automatically
* from the extension/file patterns.
*
* @return True if the file or directory has been added
*/
public boolean addFileOrDirectory(Path file) throws IOException {
if (Files.isDirectory(file)) {
return addDirectory(file);
} else if (Files.isRegularFile(file)) {
return addFile(file);
} else {
reporter.error("Not a file or directory {0}", file);
return false;
}
}
/**
* Opens a zip file and returns a FileSystem for its contents, so
* it can be explored with the {@link Path} API. You can then call
* {@link #addFile(Path)} and such. The zip file is registered as
* a resource to close at the end of analysis.
*
* @deprecated Use {@link #addZipFileWithContent(Path)} instead.
*/
@Deprecated
public FileSystem addZipFile(Path zipFile) {
if (!Files.isRegularFile(zipFile)) {
throw new IllegalArgumentException("Not a regular file: " + zipFile);
}
URI zipUri = URI.create("jar:" + zipFile.toUri());
try {
FileSystem fs = FileSystems.newFileSystem(zipUri, Collections.<String, Object>emptyMap());
resourcesToClose.add(fs);
return fs;
} catch (FileSystemAlreadyExistsException | ProviderNotFoundException | IOException e) {
reporter.errorEx("Cannot open zip file " + zipFile, e);
return null;
}
}
/**
* Opens a zip file and adds all files of the zip file to the list
* of files to be processed.
*
* <p>The zip file is registered as a resource to close at the end of analysis.</p>
*
* @return True if the zip file including its content has been added without errors
*/
@Experimental
public boolean addZipFileWithContent(Path zipFile) throws IOException {
if (!Files.isRegularFile(zipFile)) {
throw new IllegalArgumentException("Not a regular file: " + zipFile);
}
URI zipUri = URI.create("jar:" + zipFile.toUri());
FileSystem fs;
boolean isNewFileSystem = false;
try {
// find an existing file system, may fail
fs = FileSystems.getFileSystem(zipUri);
} catch (FileSystemNotFoundException ignored) {
// if it fails, try to create it.
try {
fs = FileSystems.newFileSystem(zipUri, Collections.<String, Object>emptyMap());
isNewFileSystem = true;
} catch (ProviderNotFoundException | IOException e) {
reporter.errorEx("Cannot open zip file " + zipFile, e);
return false;
}
}
try (FileCollector zipCollector = newZipCollector(zipFile)) {
for (Path zipRoot : fs.getRootDirectories()) {
zipCollector.addFileOrDirectory(zipRoot);
}
this.absorb(zipCollector);
if (isNewFileSystem) {
resourcesToClose.add(fs);
}
} catch (IOException ioe) {
reporter.errorEx("Error reading zip file " + zipFile + ", will be skipped", ioe);
fs.close();
return false;
}
return true;
}
/** A collector that prefixes the display name of the files it will contain with the path of the zip. */
@Experimental
private FileCollector newZipCollector(Path zipFilePath) {
return new FileCollector(discoverer, reporter, FileId.fromPath(zipFilePath));
}
// configuration
/**
* Sets the charset to use for subsequent calls to {@link #addFile(Path)}
* and other overloads using a {@link Path}.
*
* @param charset A charset
*/
public void setCharset(Charset charset) {
this.charset = Objects.requireNonNull(charset);
}
// filtering
/**
* Remove all files collected by the given collector from this one.
*/
public void exclude(FileCollector excludeCollector) {
Set<TextFile> toExclude = new HashSet<>(excludeCollector.allFilesToProcess);
for (Iterator<TextFile> iterator = allFilesToProcess.iterator(); iterator.hasNext();) {
TextFile file = iterator.next();
if (toExclude.contains(file)) {
LOG.trace("Excluding file {}", file.getFileId());
iterator.remove();
}
}
}
/**
* Add all files collected in the other collector into this one.
* Transfers resources to close as well. The parameter is left empty.
*/
public void absorb(FileCollector otherCollector) {
this.allFilesToProcess.addAll(otherCollector.allFilesToProcess);
this.resourcesToClose.addAll(otherCollector.resourcesToClose);
otherCollector.allFilesToProcess.clear();
otherCollector.resourcesToClose.clear();
}
/**
* Exclude all collected files whose language is not part of the given
* collection.
*/
public void filterLanguages(Set<Language> languages) {
for (Iterator<TextFile> iterator = allFilesToProcess.iterator(); iterator.hasNext();) {
TextFile file = iterator.next();
Language lang = file.getLanguageVersion().getLanguage();
if (!languages.contains(lang)) {
LOG.trace("Filtering out {}, no rules for language {}", file.getFileId(), lang);
iterator.remove();
}
}
}
@Override
public String toString() {
return "FileCollector{filesToProcess=" + allFilesToProcess + '}';
}
}
| 15,632 | 34.691781 | 134 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/NioTextFile.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.BufferedWriter;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.internal.util.BaseCloseable;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* A {@link TextFile} backed by a file in some {@link FileSystem}.
*/
class NioTextFile extends BaseCloseable implements TextFile {
private final Path path;
private final Charset charset;
private final LanguageVersion languageVersion;
private final FileId fileId;
private boolean readOnly;
NioTextFile(Path path,
@Nullable FileId parentFsPath,
Charset charset,
LanguageVersion languageVersion,
boolean readOnly) {
AssertionUtil.requireParamNotNull("path", path);
AssertionUtil.requireParamNotNull("charset", charset);
AssertionUtil.requireParamNotNull("language version", languageVersion);
this.readOnly = readOnly;
this.path = path;
this.charset = charset;
this.languageVersion = languageVersion;
// using the URI here, that handles files inside zip archives automatically (schema "jar:file:...!/path/inside/zip")
// normalization ensures cannonical paths
this.fileId = FileId.fromPath(path, parentFsPath);
}
@Override
public @NonNull LanguageVersion getLanguageVersion() {
return languageVersion;
}
@Override
public FileId getFileId() {
return fileId;
}
@Override
public boolean isReadOnly() {
return readOnly || !Files.isWritable(path);
}
@Override
public void writeContents(TextFileContent content) throws IOException {
ensureOpen();
if (isReadOnly()) {
throw new ReadOnlyFileException(this);
}
try (BufferedWriter bw = Files.newBufferedWriter(path, charset)) {
if (TextFileContent.NORMALIZED_LINE_TERM.equals(content.getLineTerminator())) {
content.getNormalizedText().writeFully(bw);
} else {
for (Chars line : content.getNormalizedText().lines()) {
line.writeFully(bw);
bw.write(content.getLineTerminator());
}
}
}
}
@Override
public TextFileContent readContents() throws IOException {
ensureOpen();
if (!Files.isRegularFile(path)) {
throw new IOException("Not a regular file: " + path);
}
return TextFileContent.fromInputStream(Files.newInputStream(path), charset);
}
@Override
protected void doClose() throws IOException {
// nothing to do.
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
@SuppressWarnings("PMD.CloseResource")
NioTextFile that = (NioTextFile) o;
return path.equals(that.path);
}
@Override
public int hashCode() {
return path.hashCode();
}
@Override
public String toString() {
return "NioTextFile[charset=" + charset + ", path=" + path + ']';
}
}
| 3,600 | 28.276423 | 124 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/BaseMappedDocument.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.IOException;
import org.checkerframework.checker.nullness.qual.NonNull;
/**
* Base class for documents that apply a transform to their output offsets.
* This includes translated documents, and slices (subdocument views).
*/
abstract class BaseMappedDocument implements TextDocument {
protected final TextDocument base;
BaseMappedDocument(TextDocument base) {
this.base = base;
}
@Override
public long getCheckSum() {
return base.getCheckSum();
}
@Override
public FileId getFileId() {
return base.getFileId();
}
@Override
public Chars sliceOriginalText(TextRegion region) {
return base.sliceOriginalText(inputRegion(region));
}
@Override
public FileLocation toLocation(TextRegion region) {
return base.toLocation(inputRegion(region));
}
@Override
public TextRegion createLineRange(int startLineInclusive, int endLineInclusive) {
// see the doc, lines do not need to be translated
return base.createLineRange(startLineInclusive, endLineInclusive);
}
@Override
public TextPos2d lineColumnAtOffset(int offset, boolean inclusive) {
return base.lineColumnAtOffset(inputOffset(offset, inclusive));
}
/**
* Translate a region given in the coordinate system of this
* document, to the coordinate system of the base document.
* This works as if creating a new region with both start and end
* offsets translated through {@link #inputOffset(int, boolean)}. The
* returned region may have a different length.
*
* @param outputRegion Output region
*
* @return Input region
*/
protected @NonNull TextRegion inputRegion(TextRegion outputRegion) {
return TextRegion.fromBothOffsets(inputOffset(outputRegion.getStartOffset(), true),
inputOffset(outputRegion.getEndOffset(), false));
}
/**
* Returns the input offset for the given output offset. This maps
* back an offset in the coordinate system of this document, to the
* coordinate system of the base document. This includes the
* length of any unicode escapes.
*
* <pre>
* input: "a\u00a0b" (original document)
* translated: "a b" (this document)
*
* translateOffset(0) = 0
* translateOffset(1) = 1
* translateOffset(2) = 7 // includes the length of the escape
* </pre>
*
* @param outOffset Output offset
* @param inclusive Whether the offset is to be interpreted as the index of a character (true),
* or the position after a character (false)
*
* @return Input offset
*/
protected final int inputOffset(int outOffset, boolean inclusive) {
if (outOffset < 0 || outOffset > getLength()) {
throw new IndexOutOfBoundsException();
}
return localOffsetTransform(outOffset, inclusive);
}
/**
* Output offset to input offset.
*/
protected abstract int localOffsetTransform(int outOffset, boolean inclusive);
@Override
public void close() throws IOException {
base.close();
}
}
| 3,349 | 29.733945 | 99 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/TextRange2d.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.util.Comparator;
import java.util.Objects;
/**
* A place in a text document, represented as line/column information.
*/
public final class TextRange2d implements Comparable<TextRange2d> {
private static final Comparator<TextRange2d> COMPARATOR =
Comparator.comparingInt(TextRange2d::getStartLine)
.thenComparingInt(TextRange2d::getStartColumn)
.thenComparingInt(TextRange2d::getEndLine)
.thenComparingInt(TextRange2d::getEndColumn);
private final int startLine;
private final int startCol;
private final int endLine;
private final int endCol;
public TextRange2d(int startLine, int startCol, int endLine, int endCol) {
this.startLine = startLine;
this.startCol = startCol;
this.endLine = endLine;
this.endCol = endCol;
assert startCol >= 1 && startLine >= 1 && endLine >= 1 && endCol >= 1
: "Not a valid range " + toDisplayStringWithColon();
}
public TextPos2d getStartPos() {
return TextPos2d.pos2d(startLine, startCol);
}
public TextPos2d getEndPos() {
return TextPos2d.pos2d(endLine, endCol);
}
public String toDisplayStringWithColon() {
return getStartPos().toDisplayStringWithColon() + "-"
+ getEndPos().toDisplayStringWithColon();
}
public int getStartLine() {
return startLine;
}
public int getStartColumn() {
return startCol;
}
public int getEndLine() {
return endLine;
}
public int getEndColumn() {
return endCol;
}
public static TextRange2d range2d(TextPos2d start, TextPos2d end) {
return new TextRange2d(start.getLine(), start.getColumn(), end.getLine(), end.getColumn());
}
public static TextRange2d range2d(int bline, int bcol, int eline, int ecol) {
return new TextRange2d(bline, bcol, eline, ecol);
}
public static TextRange2d fullLine(int line, int lineLength) {
return new TextRange2d(line, 1, line, 1 + lineLength);
}
@Override
public int compareTo(TextRange2d o) {
return COMPARATOR.compare(this, o);
}
public boolean contains(TextRange2d range) {
return getStartPos().compareTo(range.getStartPos()) <= 0 && getEndPos().compareTo(range.getEndPos()) >= 0;
}
public boolean contains(TextPos2d pos) {
return getStartPos().compareTo(pos) <= 0 && getEndPos().compareTo(pos) >= 0;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TextRange2d that = (TextRange2d) o;
return this.getStartPos().equals(that.getStartPos())
&& this.getEndPos().equals(that.getEndPos());
}
@Override
public int hashCode() {
return Objects.hash(getStartPos(), getEndPos());
}
@Override
public String toString() {
return "!debug only! [" + getStartPos().toTupleString()
+ " - " + getEndPos().toTupleString() + ']';
}
}
| 3,262 | 27.622807 | 114 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/document/FileId.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.document;
import java.io.File;
import java.net.URI;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.RuleViolation;
import net.sourceforge.pmd.renderers.Renderer;
/**
* An identifier for a {@link TextFile}. This is not a path, but provides
* several methods to be rendered into path-like strings in different formats
* (for use mostly by {@link Renderer} instances). File IDs are used to
* identify files e.g. in {@link RuleViolation}, {@link FileLocation}, {@link TextFile}.
*
* <p>Note that the addressed file may not be an actual file on a file system.
* For instance, you can create file ids from strings ({@link #fromPathLikeString(String)}),
* or use {@link #STDIN} to address standard input. The rendering methods
* of this interface (like {@link #getAbsolutePath()}) do not have to return
* actual paths for those exotic files, and operate on a best-effort basis.
*
* @author Clément Fournier
*/
public interface FileId extends Comparable<FileId> {
/**
* The name used for an unknown file. This is mostly only
* relevant for unit tests.
*/
FileId UNKNOWN = new FileId() {
@Override
public String getFileName() {
return "(unknown)";
}
@Override
public String getOriginalPath() {
return "(unknown)";
}
@Override
public String getAbsolutePath() {
return getOriginalPath();
}
@Override
public String getUriString() {
return "file://" + getOriginalPath();
}
@Override
public @Nullable FileId getParentFsPath() {
return null;
}
};
/** The virtual file ID for standard input. */
FileId STDIN = new FileId() {
@Override
public String getAbsolutePath() {
return "stdin";
}
@Override
public String getUriString() {
return "stdin";
}
@Override
public String getFileName() {
return "stdin";
}
@Override
public String getOriginalPath() {
return "stdin";
}
@Override
public @Nullable FileId getParentFsPath() {
return null;
}
};
/**
* Return the simple file name, like {@link Path#getFileName()}.
* This includes the extension.
*/
String getFileName();
/**
* Return the path as it was input by the user. This may be a
* relative or absolute path.
*/
String getOriginalPath();
/**
* Return an absolute path to this file in its containing file system.
* If the file is in a zip file, then this returns a path from the
* zip root, and does not include the path of the zip itself.
*/
String getAbsolutePath();
/**
* Return a string that looks like a URI pointing to this file.
*/
String getUriString();
/**
* If this file is in a nested filesystem (eg a zip file), return
* the file ID of the container in the outer file system. Return
* null if this is in the root file system.
*/
@Nullable FileId getParentFsPath();
/**
* Two file IDs are equal if they have the same {@link #getUriString()}.
*
* @param o Object
*/
@Override
boolean equals(Object o);
@Override
default int compareTo(FileId o) {
return this.getAbsolutePath().compareTo(o.getAbsolutePath());
}
/**
* This method is intentionally only meant for debugging, and its output
* is unspecified. Code that needs a string representation should use one
* of the named string conversion methods.
*
* @deprecated Do not use this method, use one of the other getters
*/
@Override
@Deprecated
String toString();
/**
* Return a path ID for the given string. The string is interpreted
* as a file system path, so that {@link #getAbsolutePath()} and
* {@link #getUriString()} may work.
*
* @param str A string. Should be a valid file system path for the platform (see
* {@link Paths#get(String, String...)}.
*
* @return A new file id
*/
static FileId fromPathLikeString(String str) {
Path absPath = Paths.get(str).toAbsolutePath();
// this is null for the root path.
@Nullable Path fileNamePath = absPath.getFileName();
return new FileId() {
final String fileName = fileNamePath == null ? "" : fileNamePath.toString();
final String absPathStr = absPath.toString();
@Override
public String getAbsolutePath() {
return absPathStr;
}
@Override
public String getUriString() {
// pretend...
return "file://" + str;
}
@Override
public String getFileName() {
return fileName;
}
@Override
public String getOriginalPath() {
return str;
}
@Override
public boolean equals(Object obj) {
return obj instanceof FileId
&& ((FileId) obj).getUriString().equals(this.getUriString());
}
@Override
public int hashCode() {
return getUriString().hashCode();
}
@Override
public @Nullable FileId getParentFsPath() {
return null;
}
};
}
/**
* Return a new path id for the given path.
*
* @param path The path
* @param fsPath The file id of the containing file system, if it is some Zip file.
*
* @return A new file id.
*/
static FileId fromPath(Path path, @Nullable FileId fsPath) {
return new FileId() {
// Compute these beforehand as that will fail if the path
// is invalid (better now than later).
// Also, not hitting the filesystem every time we want to
// do a compareTo is good for performance.
final String absPath = path.normalize().toAbsolutePath().toString();
final String uriString = path.normalize().toUri().toString();
final String fileName = path.getFileName().toString();
final String origPath = path.toString();
@Override
public String getAbsolutePath() {
return absPath;
}
@Override
public String getUriString() {
return uriString;
}
@Override
public String getFileName() {
return fileName;
}
@Override
public String getOriginalPath() {
return origPath;
}
@Override
public @Nullable FileId getParentFsPath() {
return fsPath;
}
@Override
public boolean equals(Object obj) {
return obj instanceof FileId
&& ((FileId) obj).getUriString().equals(this.getUriString());
}
@Override
public int hashCode() {
return getUriString().hashCode();
}
@Override
public String toString() {
return "PathId.forPath(" + path + ")";
}
};
}
/**
* Return a file ID for the given path. This uses {@link #fromPath(Path, FileId)}
* and defaults the second parameter to null.
*/
static FileId fromPath(Path path) {
return fromPath(path, null);
}
/**
* Return a file ID whose methods behave the same as the first parameter,
* and whose {@link #getParentFsPath()} returns the second parameter.
*
* @param self A file id
* @param parentFsPath Another file id for the parent.
*/
static FileId asChildOf(FileId self, FileId parentFsPath) {
return new FileId() {
@Override
public @Nullable FileId getParentFsPath() {
return parentFsPath;
}
@Override
public String getUriString() {
return self.getUriString();
}
@Override
public String getFileName() {
return self.getFileName();
}
@Override
public String getOriginalPath() {
return self.getOriginalPath();
}
@Override
public String getAbsolutePath() {
return self.getAbsolutePath();
}
};
}
/**
* Return a file ID which interprets the first parameter as an absolute path.
* The path must be a valid path for this system ({@link Paths#get(String, String...)} should not fail).
* The URI is rebuilt using the outer file ID if it is non-null.
*
* @param absPath Absolute path for the file
* @param outer File ID of the outer file system (Zip), if it exists
*
* @return A new file id
*/
static FileId fromAbsolutePath(String absPath, @Nullable FileId outer) {
Path fileName = Paths.get(absPath).getFileName();
// we know this one uses platform specific thing (for display)
String platformAbsPath = absPath.replace('/', File.separatorChar);
// we know this one uses / (for URIs)
String uriAbsPath = platformAbsPath.replace(File.separatorChar, '/');
String uriStr = outer != null ? "jar:" + outer.getUriString() + "!" + uriAbsPath
: "file://" + uriAbsPath;
// zip file
return new FileId() {
@Override
public String getFileName() {
return fileName.toString();
}
@Override
public String getOriginalPath() {
return absPath;
}
@Override
public String getAbsolutePath() {
return platformAbsPath;
}
@Override
public String getUriString() {
return uriStr;
}
@Override
public @Nullable FileId getParentFsPath() {
return outer;
}
@Override
public boolean equals(Object obj) {
return obj instanceof FileId && getUriString().equals(((FileId) obj).getUriString());
}
@Override
public int hashCode() {
return getUriString().hashCode();
}
};
}
/**
* Return a file ID for a URI.
* The URI must have scheme {@code file} or {@code jar} and be a
* valid URI (see {@link URI#create(String)}). If the scheme is {@code jar},
* then the {@link #getParentFsPath()} is populated with the path of the jar.
*
* @param uriStr A uri string
*
* @return A new file id
*/
static FileId fromURI(String uriStr) throws IllegalArgumentException {
URI uri = URI.create(uriStr);
String schemeSpecificPart = uri.getSchemeSpecificPart();
if ("jar".equals(uri.getScheme())) {
int split = schemeSpecificPart.lastIndexOf('!');
if (split == -1) {
throw new IllegalArgumentException("expected a jar specific path");
} else {
String zipUri = schemeSpecificPart.substring(0, split);
String localPath = schemeSpecificPart.substring(split + 1);
FileId outer = fromURI(zipUri);
return fromAbsolutePath(localPath, outer);
}
} else if ("file".equals(uri.getScheme())) {
Path path = Paths.get(uri);
return fromPath(path);
}
throw new UnsupportedOperationException("Unknown scheme " + uriStr);
}
}
| 12,172 | 29.05679 | 108 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/AbstractDelegateRule.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.util.List;
import java.util.Map;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.RulePriority;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.LanguageProcessor;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.properties.MultiValuePropertyDescriptor;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertySource;
/**
* Base class for Rule implementations which delegate to another Rule instance.
*
* @deprecated This is only relevant to {@link RuleReference}, but prevents sharing the implementation
* of {@link net.sourceforge.pmd.properties.AbstractPropertySource}. Will be removed in 7.0.0
*/
@Deprecated
public abstract class AbstractDelegateRule implements Rule {
private Rule rule;
public Rule getRule() {
return rule;
}
/**
* @deprecated This will be removed in 7.0.0
* I mark it specially deprecated because it's inherited by rule reference,
* even though a RuleReference has no business setting its rule after construction
*/
@Deprecated
public void setRule(Rule rule) {
this.rule = rule;
}
@Override
public Language getLanguage() {
return rule.getLanguage();
}
@Override
public void setLanguage(Language language) {
rule.setLanguage(language);
}
@Override
public LanguageVersion getMinimumLanguageVersion() {
return rule.getMinimumLanguageVersion();
}
@Override
public void setMinimumLanguageVersion(LanguageVersion minimumlanguageVersion) {
rule.setMinimumLanguageVersion(minimumlanguageVersion);
}
@Override
public LanguageVersion getMaximumLanguageVersion() {
return rule.getMaximumLanguageVersion();
}
@Override
public void setMaximumLanguageVersion(LanguageVersion maximumlanguageVersion) {
rule.setMaximumLanguageVersion(maximumlanguageVersion);
}
@Override
public boolean isDeprecated() {
return rule.isDeprecated();
}
@Override
public void setDeprecated(boolean deprecated) {
rule.setDeprecated(deprecated);
}
/**
* @see PropertySource#dysfunctionReason()
*/
@Override
public String dysfunctionReason() {
return rule.dysfunctionReason();
}
@Override
public String getName() {
return rule.getName();
}
@Override
public void setName(String name) {
rule.setName(name);
}
@Override
public String getSince() {
return rule.getSince();
}
@Override
public void setSince(String since) {
rule.setSince(since);
}
@Override
public String getRuleClass() {
return rule.getRuleClass();
}
@Override
public void setRuleClass(String ruleClass) {
rule.setRuleClass(ruleClass);
}
@Override
public String getRuleSetName() {
return rule.getRuleSetName();
}
@Override
public void setRuleSetName(String name) {
rule.setRuleSetName(name);
}
@Override
public String getMessage() {
return rule.getMessage();
}
@Override
public void setMessage(String message) {
rule.setMessage(message);
}
@Override
public String getDescription() {
return rule.getDescription();
}
@Override
public void setDescription(String description) {
rule.setDescription(description);
}
@Override
public List<String> getExamples() {
return rule.getExamples();
}
@Override
public void addExample(String example) {
rule.addExample(example);
}
@Override
public String getExternalInfoUrl() {
return rule.getExternalInfoUrl();
}
@Override
public void setExternalInfoUrl(String url) {
rule.setExternalInfoUrl(url);
}
@Override
public RulePriority getPriority() {
return rule.getPriority();
}
@Override
public void setPriority(RulePriority priority) {
rule.setPriority(priority);
}
@Override
public void definePropertyDescriptor(PropertyDescriptor<?> propertyDescriptor) throws IllegalArgumentException {
rule.definePropertyDescriptor(propertyDescriptor);
}
@Override
public PropertyDescriptor<?> getPropertyDescriptor(String name) {
return rule.getPropertyDescriptor(name);
}
@Override
public List<PropertyDescriptor<?>> getPropertyDescriptors() {
return rule.getPropertyDescriptors();
}
@Override
public <T> T getProperty(PropertyDescriptor<T> propertyDescriptor) {
return rule.getProperty(propertyDescriptor);
}
@Override
public <T> void setProperty(PropertyDescriptor<T> propertyDescriptor, T value) {
rule.setProperty(propertyDescriptor, value);
}
@Override
public <V> void setProperty(MultiValuePropertyDescriptor<V> propertyDescriptor, V... values) {
rule.setProperty(propertyDescriptor, values);
}
@Override
public boolean isPropertyOverridden(PropertyDescriptor<?> propertyDescriptor) {
return rule.isPropertyOverridden(propertyDescriptor);
}
@Override
public Map<PropertyDescriptor<?>, Object> getPropertiesByPropertyDescriptor() {
return rule.getPropertiesByPropertyDescriptor();
}
@Override
public RuleTargetSelector getTargetSelector() {
return rule.getTargetSelector();
}
@Override
public void start(RuleContext ctx) {
rule.start(ctx);
}
@Override
public void apply(Node target, RuleContext ctx) {
rule.apply(target, ctx);
}
@Override
public void end(RuleContext ctx) {
rule.end(ctx);
}
@Override
public void initialize(LanguageProcessor languageProcessor) {
rule.initialize(languageProcessor);
}
/**
* @see Rule#hasDescriptor(PropertyDescriptor)
*/
@Override
public boolean hasDescriptor(PropertyDescriptor<?> descriptor) {
return rule.hasDescriptor(descriptor);
}
}
| 6,342 | 23.490347 | 116 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/RuleTargetSelector.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.Set;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.RootNode;
import net.sourceforge.pmd.lang.rule.internal.TargetSelectorInternal;
import net.sourceforge.pmd.lang.rule.internal.TreeIndex;
import net.sourceforge.pmd.util.CollectionUtil;
/**
* A strategy for selecting nodes that will be targeted by a rule.
*
* @see Rule#getTargetSelector()
*/
public abstract class RuleTargetSelector extends TargetSelectorInternal {
RuleTargetSelector() {
// package private, prevents subclassing (all the API is protected and internal)
}
/**
* Target nodes having one of the given XPath local name.
*
* @param names XPath names
*
* @return A selector
*
* @throws IllegalArgumentException If the argument is empty
*/
public static RuleTargetSelector forXPathNames(Collection<String> names) {
if (names.isEmpty()) {
throw new IllegalArgumentException("Cannot visit zero nodes");
}
return new StringRulechainVisits(names);
}
/**
* Target nodes that are subtypes of any of the given classes.
*
* @param types Node types
*
* @return A selector
*
* @throws IllegalArgumentException If the argument is empty
* @throws NullPointerException If the argument is null
* @throws NullPointerException If any of the elements is null
*/
public static RuleTargetSelector forTypes(Collection<Class<? extends Node>> types) {
if (types.isEmpty()) {
throw new IllegalArgumentException("Cannot visit zero types");
}
return new ClassRulechainVisits(types);
}
/**
* Target nodes that are subtypes of any of the given classes.
*
* @param types Node types
*
* @return A selector
*
* @throws NullPointerException if any of the arguments is null
*/
@SafeVarargs
public static RuleTargetSelector forTypes(Class<? extends Node> first, Class<? extends Node>... types) {
return forTypes(CollectionUtil.listOf(first, types));
}
/**
* Target only the root of the tree.
*/
public static RuleTargetSelector forRootOnly() {
return ClassRulechainVisits.ROOT_ONLY;
}
@InternalApi
public boolean isRuleChain() {
return this != ClassRulechainVisits.ROOT_ONLY; // NOPMD #3205
}
private static final class StringRulechainVisits extends RuleTargetSelector {
private final Set<String> visits;
StringRulechainVisits(Collection<String> visits) {
this.visits = new HashSet<>(visits);
}
@Override
protected void prepare(ApplicatorBuilder builder) {
builder.registerXPathNames(visits);
}
@Override
protected Iterator<? extends Node> getVisitedNodes(TreeIndex index) {
return index.getByName(visits);
}
@Override
public String toString() {
return "XPathNameVisits" + visits;
}
}
private static final class ClassRulechainVisits extends RuleTargetSelector {
public static final RuleTargetSelector ROOT_ONLY = new ClassRulechainVisits(Collections.singleton(RootNode.class));
private final Set<Class<? extends Node>> visits;
ClassRulechainVisits(Collection<Class<? extends Node>> visits) {
if (visits.contains(null)) {
throw new NullPointerException("Null element in class visits " + visits);
}
this.visits = new LinkedHashSet<>(visits);
}
@Override
protected void prepare(ApplicatorBuilder builder) {
builder.registerClasses(visits);
}
@Override
protected Iterator<? extends Node> getVisitedNodes(TreeIndex index) {
return index.getByClass(visits);
}
@Override
public String toString() {
return "ClassVisits" + visits;
}
}
}
| 4,363 | 28.687075 | 123 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/RuleReference.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RulePriority;
import net.sourceforge.pmd.RuleSetReference;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.util.StringUtil;
/**
* This class represents a Rule which is a reference to Rule defined in another
* RuleSet. All details of the Rule are delegated to the underlying referenced
* Rule, but those operations which modify overridden aspects of the rule are
* explicitly tracked. Modification operations which set a value to the current
* underlying value do not override.
*/
public class RuleReference extends AbstractDelegateRule {
private LanguageVersion minimumLanguageVersion;
private LanguageVersion maximumLanguageVersion;
private Boolean deprecated;
private String name;
private List<PropertyDescriptor<?>> propertyDescriptors;
private Map<PropertyDescriptor<?>, Object> propertyValues;
private String message;
private String description;
private List<String> examples;
private String externalInfoUrl;
private RulePriority priority;
private RuleSetReference ruleSetReference;
/**
* Create a new reference to the given rule.
*
* @param theRule the referenced rule
* @param theRuleSetReference the rule set, where the rule is defined
*/
public RuleReference(Rule theRule, RuleSetReference theRuleSetReference) {
setRule(theRule);
ruleSetReference = theRuleSetReference;
}
/** copy constructor */
private RuleReference(RuleReference ref) {
this.minimumLanguageVersion = ref.minimumLanguageVersion;
this.maximumLanguageVersion = ref.maximumLanguageVersion;
this.deprecated = ref.deprecated;
this.name = ref.name;
this.propertyDescriptors = ref.propertyDescriptors;
this.propertyValues = ref.propertyValues == null ? null : new HashMap<>(ref.propertyValues);
this.message = ref.message;
this.description = ref.description;
this.examples = ref.examples == null ? null : new ArrayList<>(ref.examples);
this.externalInfoUrl = ref.externalInfoUrl;
this.priority = ref.priority;
this.ruleSetReference = ref.ruleSetReference;
this.setRule(ref.getRule().deepCopy());
}
public LanguageVersion getOverriddenMinimumLanguageVersion() {
return minimumLanguageVersion;
}
@Override
public void setMinimumLanguageVersion(LanguageVersion minimumLanguageVersion) {
// Only override if different than current value, or if already
// overridden.
if (!Objects.equals(minimumLanguageVersion, super.getMinimumLanguageVersion()) || this.minimumLanguageVersion != null) {
super.setMinimumLanguageVersion(minimumLanguageVersion); // might throw
this.minimumLanguageVersion = minimumLanguageVersion;
}
}
public LanguageVersion getOverriddenMaximumLanguageVersion() {
return maximumLanguageVersion;
}
@Override
public void setMaximumLanguageVersion(LanguageVersion maximumLanguageVersion) {
// Only override if different than current value, or if already
// overridden.
if (!Objects.equals(maximumLanguageVersion, super.getMaximumLanguageVersion()) || this.maximumLanguageVersion != null) {
super.setMaximumLanguageVersion(maximumLanguageVersion); // might throw
this.maximumLanguageVersion = maximumLanguageVersion;
}
}
public Boolean isOverriddenDeprecated() {
return deprecated;
}
@Override
public boolean isDeprecated() {
return deprecated != null && deprecated;
}
@Override
public void setDeprecated(boolean deprecated) {
// Deprecation does not propagate to the underlying Rule. It is the
// Rule reference itself which is being deprecated.
this.deprecated = deprecated ? deprecated : null;
}
public String getOverriddenName() {
return name;
}
public String getOriginalName() {
return super.getName();
}
@Override
public void setName(String name) {
// Only override if different than current value, or if already
// overridden.
if (!isSame(name, super.getName()) || this.name != null) {
this.name = name;
}
}
@Override
public String getName() {
if (this.name != null) {
return this.name;
}
return super.getName();
}
public String getOverriddenMessage() {
return message;
}
@Override
public void setMessage(String message) {
// Only override if different than current value, or if already
// overridden.
if (!isSame(message, super.getMessage()) || this.message != null) {
this.message = message;
super.setMessage(message);
}
}
public String getOverriddenDescription() {
return description;
}
@Override
public void setDescription(String description) {
// Only override if different than current value, or if already
// overridden.
if (!isSame(description, super.getDescription()) || this.description != null) {
this.description = description;
super.setDescription(description);
}
}
public List<String> getOverriddenExamples() {
return examples;
}
@Override
public void addExample(String example) {
// TODO Intuitively, if some examples are overridden (even with empty value), then
// I think we should discard the previous ones. If the rule needs new examples,
// then the previous ones are not relevant.
// TODO Meaningful override of examples is hard, because they are merely
// a list of strings. How does one indicate override of a particular
// value? Via index? Rule.setExample(int, String)? But the XML format
// does not provide a means of overriding by index, not unless you took
// the position in the XML file to indicate corresponding index to
// override. But that means you have to override starting from index 0.
// This would be so much easier if examples had to have names, like
// properties.
// Only override if different than current values.
if (!contains(super.getExamples(), example)) {
if (examples == null) {
examples = new ArrayList<>(1);
}
// TODO Fix later. To keep example overrides from being unbounded,
// we're only going to keep track of the last one.
examples.clear();
examples.add(example);
super.addExample(example);
}
}
public String getOverriddenExternalInfoUrl() {
return externalInfoUrl;
}
@Override
public void setExternalInfoUrl(String externalInfoUrl) {
// Only override if different than current value, or if already
// overridden.
if (!isSame(externalInfoUrl, super.getExternalInfoUrl()) || this.externalInfoUrl != null) {
this.externalInfoUrl = externalInfoUrl;
super.setExternalInfoUrl(externalInfoUrl);
}
}
public RulePriority getOverriddenPriority() {
return priority;
}
@Override
public void setPriority(RulePriority priority) {
// Only override if different than current value, or if already
// overridden.
if (priority != super.getPriority() || this.priority != null) {
this.priority = priority;
super.setPriority(priority);
}
}
@Override
public List<PropertyDescriptor<?>> getOverriddenPropertyDescriptors() {
return propertyDescriptors == null ? Collections.<PropertyDescriptor<?>>emptyList()
: new ArrayList<>(propertyDescriptors);
}
@Override
public void definePropertyDescriptor(PropertyDescriptor<?> propertyDescriptor) throws IllegalArgumentException {
// Define on the underlying Rule, where it is impossible to have two
// property descriptors with the same name. Therefore, there is no need
// to check if the property is already overridden at this level.
super.definePropertyDescriptor(propertyDescriptor);
if (propertyDescriptors == null) {
propertyDescriptors = new ArrayList<>();
}
propertyDescriptors.add(propertyDescriptor);
}
@Override
public Map<PropertyDescriptor<?>, Object> getOverriddenPropertiesByPropertyDescriptor() {
return propertyValues == null ? new HashMap<>() : new HashMap<>(propertyValues);
}
@Override
public <T> void setProperty(PropertyDescriptor<T> propertyDescriptor, T value) {
// Only override if different than current value.
if (!Objects.equals(super.getProperty(propertyDescriptor), value)) {
if (propertyValues == null) {
propertyValues = new HashMap<>();
}
propertyValues.put(propertyDescriptor, value);
super.setProperty(propertyDescriptor, value);
}
}
public RuleSetReference getRuleSetReference() {
return ruleSetReference;
}
/**
* @deprecated There's no use in setting the ruleset reference after construction
*/
@Deprecated
public void setRuleSetReference(RuleSetReference ruleSetReference) {
this.ruleSetReference = ruleSetReference;
}
private static boolean isSame(String s1, String s2) {
return StringUtil.isSame(s1, s2, true, false, true);
}
private static boolean contains(Collection<String> collection, String s1) {
for (String s2 : collection) {
if (isSame(s1, s2)) {
return true;
}
}
return false;
}
@Override
public boolean hasDescriptor(PropertyDescriptor<?> descriptor) {
return propertyDescriptors != null && propertyDescriptors.contains(descriptor)
|| super.hasDescriptor(descriptor);
}
/**
* @deprecated Use {@link #isPropertyOverridden(PropertyDescriptor)} instead
*/
@Deprecated
public boolean hasOverriddenProperty(PropertyDescriptor<?> descriptor) {
return isPropertyOverridden(descriptor);
}
@Override
public boolean isPropertyOverridden(PropertyDescriptor<?> descriptor) {
return propertyValues != null && propertyValues.containsKey(descriptor);
}
@Override
public Rule deepCopy() {
return new RuleReference(this);
}
/**
* Checks whether this rule reference explicitly overrides any of the possible
* attributes of the referenced rule.
* @return <code>true</code> if there is at least one attribute overridden. <code>false</code> if
* the referenced rule is referenced without any change.
*/
public boolean hasOverriddenAttributes() {
return deprecated != null || description != null || examples != null || externalInfoUrl != null
|| maximumLanguageVersion != null || minimumLanguageVersion != null
|| message != null || name != null || priority != null
|| propertyDescriptors != null || propertyValues != null;
}
}
| 11,712 | 34.068862 | 128 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/AbstractVisitorRule.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.lang.ast.AstVisitor;
import net.sourceforge.pmd.lang.ast.Node;
public abstract class AbstractVisitorRule extends AbstractRule {
@Override
public void apply(Node target, RuleContext ctx) {
AstVisitor<RuleContext, ?> visitor = buildVisitor();
assert visitor != null : "Rule should provide a non-null visitor";
target.acceptVisitor(visitor, ctx);
}
/**
* Returns a rule visitor that can visit nodes for the given rule context.
* This visitor should explore the nodes it's interested in and report
* violations on the given rule context.
* <p>
* Language specific subclasses should redefine the return type to use
* a language specific visitor interface.
* </p>
*
* @return A visitor bound to the given rule context
*/
public abstract AstVisitor<RuleContext, ?> buildVisitor();
}
| 1,076 | 31.636364 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/ParametricRuleViolation.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.util.Collections;
import java.util.Map;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RuleViolation;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.lang.document.FileLocation;
import net.sourceforge.pmd.reporting.Reportable;
import net.sourceforge.pmd.util.AssertionUtil;
/**
* @deprecated This is internal. Clients should exclusively use {@link RuleViolation}.
*/
@Deprecated
@InternalApi
public class ParametricRuleViolation implements RuleViolation {
// todo move to package reporting
protected final Rule rule;
protected final String description;
private final FileLocation location;
private final Map<String, String> additionalInfo;
// todo add factory methods on the interface and hide the class.
/**
* @deprecated Update tests that use this not to call the ctor directly.
*/
@Deprecated
public ParametricRuleViolation(Rule theRule, Reportable node, String message) {
this(theRule, node.getReportLocation(), message, Collections.emptyMap());
}
public ParametricRuleViolation(Rule theRule, FileLocation location, String message) {
this(theRule, location, message, Collections.emptyMap());
}
public ParametricRuleViolation(Rule theRule, Reportable node, String message, Map<String, String> additionalInfo) {
this(theRule, node.getReportLocation(), message, additionalInfo);
}
public ParametricRuleViolation(Rule theRule, FileLocation location, String message, Map<String, String> additionalInfo) {
this.rule = AssertionUtil.requireParamNotNull("rule", theRule);
this.description = AssertionUtil.requireParamNotNull("message", message);
this.location = location;
if (!additionalInfo.isEmpty()) {
this.additionalInfo = Collections.unmodifiableMap(additionalInfo);
} else {
this.additionalInfo = Collections.emptyMap();
}
}
@Override
public Map<String, String> getAdditionalInfo() {
return additionalInfo;
}
@Override
public Rule getRule() {
return rule;
}
@Override
public String getDescription() {
return description;
}
@Override
public FileLocation getLocation() {
return location;
}
@Override
public String toString() {
return getLocation().startPosToStringWithFile() + ':' + getRule() + ':' + getDescription();
}
}
| 2,595 | 28.83908 | 125 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/XPathRule.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ContextedRuntimeException;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.annotation.DeprecatedUntil700;
import net.sourceforge.pmd.lang.LanguageProcessor;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.rule.xpath.PmdXPathException;
import net.sourceforge.pmd.lang.rule.xpath.XPathVersion;
import net.sourceforge.pmd.lang.rule.xpath.internal.DeprecatedAttrLogger;
import net.sourceforge.pmd.lang.rule.xpath.internal.SaxonXPathRuleQuery;
import net.sourceforge.pmd.properties.PropertyDescriptor;
import net.sourceforge.pmd.properties.PropertyFactory;
/**
* Rule that tries to match an XPath expression against a DOM view of an AST.
*/
public final class XPathRule extends AbstractRule {
private static final Logger LOG = LoggerFactory.getLogger(XPathRule.class);
// TODO move to XPath subpackage
/**
* @deprecated Use {@link #XPathRule(XPathVersion, String)}
*/
@Deprecated
public static final PropertyDescriptor<String> XPATH_DESCRIPTOR =
PropertyFactory.stringProperty("xpath")
.desc("XPath expression")
.defaultValue("")
.build();
/**
* @deprecated Use {@link #XPathRule(XPathVersion, String)}
*/
@Deprecated
@DeprecatedUntil700
public static final PropertyDescriptor<XPathVersion> VERSION_DESCRIPTOR =
PropertyFactory.enumProperty("version", getXPathVersions())
.desc("XPath specification version")
.defaultValue(XPathVersion.DEFAULT)
.build();
/**
* This is initialized only once when calling {@link #apply(Node, RuleContext)} or {@link #getTargetSelector()}.
*/
private SaxonXPathRuleQuery xpathRuleQuery;
// this is shared with rules forked by deepCopy, used by the XPathRuleQuery
private DeprecatedAttrLogger attrLogger = DeprecatedAttrLogger.create(this);
/**
* @deprecated This is now only used by the ruleset loader. When
* we have syntactic sugar for XPath rules in the XML, we won't
* need this anymore.
*/
@Deprecated
public XPathRule() {
definePropertyDescriptor(XPATH_DESCRIPTOR);
definePropertyDescriptor(VERSION_DESCRIPTOR);
}
/**
* Make a new XPath rule with the given version + expression
*
* @param version Version of the XPath language
* @param expression XPath expression
*
* @throws NullPointerException If any of the arguments is null
*/
public XPathRule(XPathVersion version, String expression) {
this();
Objects.requireNonNull(version, "XPath version is null");
Objects.requireNonNull(expression, "XPath expression is null");
setProperty(XPathRule.XPATH_DESCRIPTOR, expression);
setProperty(XPathRule.VERSION_DESCRIPTOR, XPathVersion.ofId(version.getXmlName()));
}
@Override
public Rule deepCopy() {
XPathRule rule = (XPathRule) super.deepCopy();
rule.attrLogger = this.attrLogger;
return rule;
}
/**
* Returns the version for this rule. Returns null if this is not
* set or invalid.
*/
public XPathVersion getVersion() {
return getProperty(VERSION_DESCRIPTOR);
}
/**
* Returns the XPath expression that implements this rule.
*/
public String getXPathExpression() {
return getProperty(XPATH_DESCRIPTOR);
}
@Override
public void apply(Node target, RuleContext ctx) {
SaxonXPathRuleQuery query = getQueryMaybeInitialize();
List<Node> nodesWithViolation;
try {
nodesWithViolation = query.evaluate(target);
} catch (PmdXPathException e) {
throw addExceptionContext(e);
}
for (Node nodeWithViolation : nodesWithViolation) {
addViolation(ctx, nodeWithViolation, nodeWithViolation.getImage());
}
}
private ContextedRuntimeException addExceptionContext(PmdXPathException e) {
return e.addRuleName(getName());
}
@Override
public void initialize(LanguageProcessor languageProcessor) {
String xpath = getXPathExpression();
XPathVersion version = getVersion();
if (version == null) {
throw new IllegalStateException("Invalid XPath version, should have been caught by Rule::dysfunctionReason");
}
try {
xpathRuleQuery = new SaxonXPathRuleQuery(xpath,
version,
getPropertiesByPropertyDescriptor(),
languageProcessor.services().getXPathHandler(),
attrLogger);
} catch (PmdXPathException e) {
throw addExceptionContext(e);
}
}
private SaxonXPathRuleQuery getQueryMaybeInitialize() throws PmdXPathException {
if (xpathRuleQuery == null) {
throw new IllegalStateException("Not initialized");
}
return xpathRuleQuery;
}
@Override
protected @NonNull RuleTargetSelector buildTargetSelector() {
List<String> visits = getQueryMaybeInitialize().getRuleChainVisits();
logXPathRuleChainUsage(!visits.isEmpty());
return visits.isEmpty() ? RuleTargetSelector.forRootOnly()
: RuleTargetSelector.forXPathNames(visits);
}
private void logXPathRuleChainUsage(boolean usesRuleChain) {
LOG.debug("{} rule chain for XPath {} rule: {} ({})",
usesRuleChain ? "Using" : "no",
getProperty(XPathRule.VERSION_DESCRIPTOR),
getName(),
getRuleSetName());
}
@Override
public String dysfunctionReason() {
if (getVersion() == null) {
return "Invalid XPath version '" + getProperty(VERSION_DESCRIPTOR) + "'";
} else if (StringUtils.isBlank(getXPathExpression())) {
return "Missing XPath expression";
}
return null;
}
private static Map<String, XPathVersion> getXPathVersions() {
Map<String, XPathVersion> tmp = new HashMap<>();
for (XPathVersion v : XPathVersion.values()) {
tmp.put(v.getXmlName(), v);
}
return Collections.unmodifiableMap(tmp);
}
}
| 6,943 | 31.754717 | 121 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/AbstractRule.java | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.checkerframework.checker.nullness.qual.NonNull;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.RulePriority;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.RootNode;
import net.sourceforge.pmd.properties.AbstractPropertySource;
import net.sourceforge.pmd.properties.PropertyDescriptor;
/**
* Basic abstract implementation of all parser-independent methods of the Rule
* interface.
*
* @author pieter_van_raemdonck - Application Engineers NV/SA - www.ae.be
*/
public abstract class AbstractRule extends AbstractPropertySource implements Rule {
private Language language;
private LanguageVersion minimumLanguageVersion;
private LanguageVersion maximumLanguageVersion;
private boolean deprecated;
private String name = getClass().getName();
private String since;
private String ruleClass = getClass().getName();
private String ruleSetName;
private String message;
private String description;
private List<String> examples = new ArrayList<>();
private String externalInfoUrl;
private RulePriority priority = RulePriority.LOW;
private Set<String> ruleChainVisits = new LinkedHashSet<>();
private Set<Class<? extends Node>> classRuleChainVisits = new LinkedHashSet<>();
private RuleTargetSelector myStrategy;
public AbstractRule() {
definePropertyDescriptor(Rule.VIOLATION_SUPPRESS_REGEX_DESCRIPTOR);
definePropertyDescriptor(Rule.VIOLATION_SUPPRESS_XPATH_DESCRIPTOR);
}
@Override
protected String getPropertySourceType() {
return "rule";
}
/**
* @deprecated Use {@link #deepCopy()} to create verbatim copies of rules.
*/
@Deprecated
public void deepCopyValuesTo(AbstractRule otherRule) {
otherRule.language = language;
otherRule.minimumLanguageVersion = minimumLanguageVersion;
otherRule.maximumLanguageVersion = maximumLanguageVersion;
otherRule.deprecated = deprecated;
otherRule.name = name;
otherRule.since = since;
otherRule.ruleClass = ruleClass;
otherRule.ruleSetName = ruleSetName;
otherRule.message = message;
otherRule.description = description;
otherRule.examples = copyExamples();
otherRule.externalInfoUrl = externalInfoUrl;
otherRule.priority = priority;
otherRule.propertyDescriptors = new ArrayList<>(getPropertyDescriptors());
otherRule.propertyValuesByDescriptor = copyPropertyValues();
otherRule.ruleChainVisits = new LinkedHashSet<>(ruleChainVisits);
otherRule.classRuleChainVisits = new LinkedHashSet<>(classRuleChainVisits);
}
private List<String> copyExamples() {
return new ArrayList<>(examples);
}
@Override
public Language getLanguage() {
return language;
}
@Override
public void setLanguage(Language language) {
if (this.language != null && !this.language.equals(language)) {
throw new UnsupportedOperationException("The Language for Rule class " + this.getClass().getName()
+ " is immutable and cannot be changed.");
}
this.language = language;
}
@Override
public LanguageVersion getMinimumLanguageVersion() {
return minimumLanguageVersion;
}
@Override
public void setMinimumLanguageVersion(LanguageVersion minimumLanguageVersion) {
if (minimumLanguageVersion != null && !minimumLanguageVersion.getLanguage().equals(getLanguage())) {
throw new IllegalArgumentException("Version " + minimumLanguageVersion + " does not belong to language " + getLanguage());
}
this.minimumLanguageVersion = minimumLanguageVersion;
}
@Override
public LanguageVersion getMaximumLanguageVersion() {
return maximumLanguageVersion;
}
@Override
public void setMaximumLanguageVersion(LanguageVersion maximumLanguageVersion) {
if (maximumLanguageVersion != null && !maximumLanguageVersion.getLanguage().equals(getLanguage())) {
throw new IllegalArgumentException("Version " + maximumLanguageVersion + " does not belong to language " + getLanguage());
}
this.maximumLanguageVersion = maximumLanguageVersion;
}
@Override
public boolean isDeprecated() {
return deprecated;
}
@Override
public void setDeprecated(boolean deprecated) {
this.deprecated = deprecated;
}
@Override
public String getName() {
return name;
}
@Override
public void setName(String name) {
this.name = name;
}
@Override
public String getSince() {
return since;
}
@Override
public void setSince(String since) {
this.since = since;
}
@Override
public String getRuleClass() {
return ruleClass;
}
@Override
public void setRuleClass(String ruleClass) {
this.ruleClass = ruleClass;
}
@Override
public String getRuleSetName() {
return ruleSetName;
}
@Override
public void setRuleSetName(String ruleSetName) {
this.ruleSetName = ruleSetName;
}
@Override
public String getMessage() {
return message;
}
@Override
public void setMessage(String message) {
this.message = message;
}
@Override
public String getDescription() {
return description;
}
@Override
public void setDescription(String description) {
this.description = description;
}
@Override
public List<String> getExamples() {
// TODO Needs to be externally immutable
return examples;
}
@Override
public void addExample(String example) {
examples.add(example);
}
@Override
public String getExternalInfoUrl() {
return externalInfoUrl;
}
@Override
public void setExternalInfoUrl(String externalInfoUrl) {
this.externalInfoUrl = externalInfoUrl;
}
@Override
public RulePriority getPriority() {
return priority;
}
@Override
public void setPriority(RulePriority priority) {
this.priority = priority;
}
private Set<Class<? extends Node>> getClassRuleChainVisits() {
if (classRuleChainVisits.isEmpty() && ruleChainVisits.isEmpty()) {
return Collections.singleton(RootNode.class);
}
return classRuleChainVisits;
}
/**
* @deprecated Override {@link #buildTargetSelector()}, this is
* provided for legacy compatibility
*/
@Deprecated
protected void addRuleChainVisit(Class<? extends Node> nodeClass) {
classRuleChainVisits.add(nodeClass);
}
@Override
public final RuleTargetSelector getTargetSelector() {
if (myStrategy == null) {
myStrategy = buildTargetSelector();
}
return myStrategy;
}
/**
* Create the targeting strategy for this rule. Please override
* this instead of using {@link #addRuleChainVisit(Class)}.
* Use the factory methods of {@link RuleTargetSelector}.
*/
@NonNull
protected RuleTargetSelector buildTargetSelector() {
Set<Class<? extends Node>> crvs = getClassRuleChainVisits();
return crvs.isEmpty() ? RuleTargetSelector.forRootOnly()
: RuleTargetSelector.forTypes(crvs);
}
@Override
public void start(RuleContext ctx) {
// Override as needed
}
@Override
public void end(RuleContext ctx) {
// Override as needed
}
// TODO remove those methods, make Rules have type-safe access to a RuleContext
/**
* Cast the argument to a {@link RuleContext}. Use it to report violations:
* <pre>{@code
* asCtx(data).addViolation(node);
* asCtx(data).addViolationWithMessage(node, "Some message");
* }</pre>
*
* In PMD 7, rules will have type-safe access to a RuleContext, and
* this will be deprecated as useless. In PMD 6, you can use this to
* stop using the deprecated {@link #addViolation(Object, Node)} overloads
* of this class.
*/
protected final RuleContext asCtx(Object ctx) {
if (ctx instanceof RuleContext) {
assert isThisRule(((RuleContext) ctx).getRule())
: "not an appropriate rule context!";
return (RuleContext) ctx;
} else {
throw new ClassCastException("Unexpected context object! " + ctx);
}
}
private boolean isThisRule(Rule rule) {
return rule == this // NOPMD CompareObjectsWithEquals
|| rule instanceof AbstractDelegateRule && this.isThisRule(((AbstractDelegateRule) rule).getRule());
}
/**
* @see RuleContext#addViolation(Node)
* @deprecated Replace with {@code asCtx(data).addViolation(node)}.
*/
public void addViolation(Object data, Node node) {
asCtx(data).addViolation(node);
}
/**
* @see RuleContext#addViolation(Node, Object[])
*
* @deprecated Replace with {@code asCtx(data).addViolation(node, arg)}.
*/
public void addViolation(Object data, Node node, String arg) {
asCtx(data).addViolation(node, arg);
}
/**
* @see RuleContext#addViolation(Node, Object[])
*
* @deprecated Replace with {@code asCtx(data).addViolation(node, arg1, arg2)}.
*/
public void addViolation(Object data, Node node, Object... args) {
asCtx(data).addViolation(node, args);
}
/**
* @see RuleContext#addViolationWithMessage(Node, String)
* @deprecated Replace with {@code asCtx(data).addViolationWithMessage(node, message)}.
*/
public void addViolationWithMessage(Object data, Node node, String message) {
asCtx(data).addViolationWithMessage(node, message);
}
/**
* @see RuleContext#addViolationWithPosition(Node, int, int, String, Object...)
* @deprecated Replace with {@code asCtx(data).addViolationWithPosition(node, beginLine, endLine, message)}.
*/
public void addViolationWithMessage(Object data, Node node, String message, int beginLine, int endLine) {
asCtx(data).addViolationWithPosition(node, beginLine, endLine, message);
}
/**
* @see RuleContext#addViolationWithMessage(Node, String, Object...)
* @deprecated Replace with {@code asCtx(data).addViolationWithMessage(node, message, args)}.
*/
public void addViolationWithMessage(Object data, Node node, String message, Object[] args) {
asCtx(data).addViolationWithMessage(node, message, args);
}
/**
* Rules are equal if:
* <ol>
* <li>They have the same implementation class.</li>
* <li>They have the same name.</li>
* <li>They have the same priority.</li>
* <li>They share the same properties.</li>
* </ol>
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true; // trivial
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AbstractRule that = (AbstractRule) o;
return Objects.equals(getName(), that.getName())
&& Objects.equals(getPriority(), that.getPriority())
&& super.equals(o);
}
@Override
public int hashCode() {
return Objects.hash(getName(), getPriority(), super.hashCode());
}
@SuppressWarnings("unchecked")
@Override
public Rule deepCopy() {
Rule result;
try {
Constructor<? extends AbstractRule> declaredConstructor = getClass().getDeclaredConstructor();
declaredConstructor.setAccessible(true);
result = declaredConstructor.newInstance();
} catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException ignored) {
// Can't happen... we already have an instance
throw new RuntimeException(ignored); // in case it happens anyway, something is really wrong...
}
Rule rule = result;
rule.setName(getName());
rule.setLanguage(getLanguage());
rule.setMinimumLanguageVersion(getMinimumLanguageVersion());
rule.setMaximumLanguageVersion(getMaximumLanguageVersion());
rule.setSince(getSince());
rule.setMessage(getMessage());
rule.setRuleSetName(getRuleSetName());
rule.setExternalInfoUrl(getExternalInfoUrl());
rule.setDescription(getDescription());
for (final String example : getExamples()) {
rule.addExample(example);
}
rule.setPriority(getPriority());
for (final PropertyDescriptor<?> prop : getPropertyDescriptors()) {
// define the descriptor only if it doesn't yet exist
if (rule.getPropertyDescriptor(prop.name()) == null) {
rule.definePropertyDescriptor(prop); // Property descriptors are immutable, and can be freely shared
}
if (isPropertyOverridden(prop)) {
rule.setProperty((PropertyDescriptor<Object>) prop, getProperty((PropertyDescriptor<Object>) prop));
}
}
return rule;
}
}
| 13,770 | 31.250585 | 134 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/xpath/NoAttribute.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule.xpath;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.ast.impl.AbstractNode;
/**
* Filters out some methods from the XPath attributes of a node.
*
* @author Clément Fournier
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
public @interface NoAttribute {
/**
* When applied to a type declaration, this value determines which
* XPath attributes are filtered out. When applied to an attribute
* accessor this value has no effect and the annotation suppresses
* the attribute in any case.
*/
NoAttrScope scope() default NoAttrScope.ALL;
enum NoAttrScope {
/**
* Only attributes inherited from superclasses or superinterfaces
* are filtered out (except those from {@link Node} and {@link AbstractNode}).
* Attributes defined here, or overridden, are kept. Attributes can
* be suppressed individually with a {@link NoAttribute} on their
* accessor.
*/
INHERITED,
/**
* All attributes are suppressed, except those from {@link Node}
* and {@link AbstractNode}. This extends {@link #INHERITED} to
* the attributes defined in this class. Note that subclasses of
* the current class also will see those attributes suppressed
* unless they override them.
*/
ALL
}
}
| 1,698 | 31.673077 | 86 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/xpath/XPathVersion.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule.xpath;
import java.util.HashMap;
import java.util.Map;
/**
* Constants for XPath language version used in XPath queries.
*/
public enum XPathVersion {
/**
* XPath 1.0.
*
* @deprecated not supported anymore
*/
@Deprecated
XPATH_1_0("1.0"),
/**
* XPath 1.0 compatibility mode.
*
* @deprecated Not supported any more.
*/
@Deprecated
XPATH_1_0_COMPATIBILITY("1.0 compatibility"),
/**
* XPath 2.0.
*
* @deprecated Technically still supported, use 3.1 instead. There
* are no known incompatibilities.
*/
@Deprecated
XPATH_2_0("2.0"),
/** XPath 3.1. */
XPATH_3_1("3.1");
/**
* The default XPath version for XPath queries.
*/
public static final XPathVersion DEFAULT = XPATH_3_1;
private static final Map<String, XPathVersion> BY_NAME = new HashMap<>();
private final String version;
static {
for (XPathVersion value : values()) {
BY_NAME.put(value.getXmlName(), value);
}
}
XPathVersion(String version) {
this.version = version;
}
/**
* Returns the string used to represent the version in the XML.
*
* @return A string representation
*/
public String getXmlName() {
return version;
}
@Override
public String toString() {
return getXmlName();
}
/**
* Gets an XPath version from the string used to represent
* it in the XML.
*
* @param version A version string
*
* @return An XPath version, or null if the argument is not a valid version
*/
public static XPathVersion ofId(String version) {
return BY_NAME.get(version);
}
}
| 1,863 | 20.181818 | 79 | java |
pmd | pmd-master/pmd-core/src/main/java/net/sourceforge/pmd/lang/rule/xpath/PmdXPathException.java | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule.xpath;
import org.apache.commons.lang3.exception.ContextedRuntimeException;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sf.saxon.trans.XPathException;
/**
* Unchecked exception wrapper for {@link XPathException}.
*/
public class PmdXPathException extends ContextedRuntimeException {
private static final String ERROR_KIND = "Kind";
private static final String ERROR_PHASE = "Phase";
private static final String EXPR = "Expression";
private static final String VERSION = "Version";
private static final String RULE_NAME = "Rule";
private static final String LOCATION = "Location in expr";
public PmdXPathException(XPathException e, Phase phase, String expression, XPathVersion version) {
super(e);
setContextValue(ERROR_KIND, getErrorKind(e));
setContextValue(ERROR_PHASE, phase);
setContextValue(EXPR, expression);
setContextValue(VERSION, version);
setContextValue(LOCATION, e.getLocationAsString());
}
public Phase getPhase() {
return (Phase) getFirstContextValue(ERROR_PHASE);
}
public PmdXPathException addRuleName(String ruleName) {
setContextValue(RULE_NAME, ruleName);
return this;
}
public @Nullable String getRuleName() {
return (String) getFirstContextValue(RULE_NAME);
}
private String getErrorKind(XPathException e) {
if (e.isSyntaxError()) {
return "Syntax error";
} else if (e.isTypeError()) {
return "Type error";
} else if (e.isStaticError()) {
return "Static error";
}
return "Unknown error";
}
public enum Phase {
INITIALIZATION,
EVALUATION
}
}
| 1,873 | 28.746032 | 102 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.